metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1753839720/-",
"score": 3
} |
#### File: 1753839720/-/day04.py
```python
# 3读取数字统计个数
# c=[2,5,6,5,4,3,23,43,2]
# def num(n):
# shu=[]
# for i in n:
# b=0
# for a in n:
# if a == i:
# b+=1
# cs=str(a)+' 出现了'+str(b)+'次'
# shu.append(cs)
# zong=set(shu)
# z_=sorted((list(zong)),key=lambda x:(len(x),x[0]) )
# for i_ in z_:
# print(i_)
# num(sorted(c))
#4.分析成绩
# s = input('>>').split()
# def sorce(s):
# l1=[]
# i=0
# a=0
# b=0
# for _ in s:
# i +=1
# _ = int(_)
# l1.append(_)
# P = sum(l1)/i
# for _ in l1:
# if _ >= P:
# a+=1
# else:
# b+=1
# print(a,b)
# sorce(s)
#5100个0-9随机数个数
# import random
# def Tj():
# i_=[]
# counts=[0,0,0,0,0,0,0,0,0,0]
# for _ in range(0,1000):
# a = random.randint(0,9)
# counts[a] = counts[a]+1
# for i in range(0,10):
# i_.append(i)
# for i in i_:
# print(i,'出现了',counts[i],'次')
# Tj()
#6最小元素下标
# def indexOfSmallestElement(lst):
# l1=[]
# i=1
# a=0
# for _ in lst:
# _ = int(_)
# l1.append(_)
# while i<len(l1):
# if l1[i]<l1[a]:
# a=i
# i+=1
# print(a)
# indexOfSmallestElement([2,5,4,9,2,8,])
# 7不用random排序
# import random
# b={}
# def shuffle(n):
# global b
# c=[]
# while len(b)<=len(n)-1:
# a=random.choice(n)
# c.append(a)
# b=set(c)
# d=list(b)
# d.sort(key=c.index)
# print(d)
# shuffle([0,1,2,3,4,5,6,7,8,9])
# 8.消除重复
# def eliminateDuplicates(lst):
# l1=set(lst)
# print(list(l1))
# eliminateDuplicates([1,1,2,5,5,6,4,8])
#9.排序问题
# def isSorted(lst):
# l1=[]
# for _ in lst:
# l1.append(_)
# l1 = sorted(l1)
# if l1==lst:
# print('yes')
# return True
# else:
# print('no')
# isSorted([1,1,3,4,4,5,7,9,10,30,11])
#10.冒泡排序
# def Mp(lst):
# b=0
# n=len(lst)
# for _ in lst:
# for i in range(0,n-b-1):
# if lst[i]>lst[i+1]:
# lst[i],lst[i+1]=lst[i+1],lst[i]
# b+=1
# print(lst)
# Mp([2,6,4,8,9,7,1,3,0,5])
# 11优惠券问题(没看懂)
# def huase()
#12检测四个连续相等的数
# def isConsecutiveFour(n):
# d=[]
# for i in str(n):
# b=0
# for a in str(n):
# if a==i:
# b+=1
# d.append(b)
# e=set(d)
# if 4 in e:
# e.remove(4)
# print('yes')
# isConsecutiveFour([666644441])
#1检测ssn
# import re
# def ssn(n):
# a=re.sub('\D','',n)
# if len(a) == 9:
# print('Valid SSN')
# else:
# print('Invaild SSN')
# ssn('111-11-1111')
# 2检测第一个字符串是否是第二个的字串
# def chuan(s,n):
# a=n.find(s)
# if a>=0:
# print('yes')
# else:
# print('no')
# chuan('1','123')
# 3密码合法(8个字符,包含英文和数字,至少包含两个数字)
# import re
# def mima(n):
# a=re.sub('\D','',n)
# print(a)
# if len(n)>=8 and n.isalnum() == True and len(a)>=2:
# print('vaild password')
# else:
# print('invalid password')
# mima('12aab<PASSWORD>')
# 4输入字符串显示字母数
# def countletters(s):
# zi=[]
# for i in s:
# b=0
# for a in s:
# if a == i:
# b+=1
# zm=str(a)+'出现了'+str(b)+'次'
# zi.append(zm)
# zi_=set(zi)
# zi__=sorted(list(zi_))
# for i_ in zi__:
# print(i_)
# countletters('icosnvowvinwbeuiboivn')
# 5按手机键盘的位置字母转数字(大小写全写,懒得写了)
# def getNumber(n):
# for i in n:
# if i=='a' or i=='A' or i=='b' or i=="B" or i=='c' or i=='C':
# print('2',end='')
# elif i=='d'or i=='e' or i=='f'or i=='F':
# print('3',end='')
# elif i=='g'or i=='h' or i=='i':
# print('4',end='')
# elif i=='j' or i=='k' or i=='l':
# print('5',end='')
# elif i=='m' or i=='n' or i=='o':
# print('6',end='')
# elif i=='p' or i=='q' or i=='r' or i=='s':
# print('7',end='')
# elif i=='t' or i=='u' or i=='v':
# print('8',end='')
# elif i=='w' or i=='x' or i=='y' or i=='z':
# print('9',end='')
# else :
# print(i,end='')
# print()
# getNumber('1-800-Flowers')
# getNumber('1800flowers')
# 6反向字符串
# def reverse(s):
# s_=s[::-1]
# print(s_)
# reverse('000dvnnoinvisn')
#7改写6.29但是没有6.29
#8检测ISBN-13
# def isbn(n):
# b=[]
# for i in str(n):
# b.append(i)
# b_=[int(x) for x in b ]
# c=10-(b_[0]+3*b_[1]+b_[2]+3*b_[3]+b_[4]+3*b_[5]+b_[6]+3*b_[7]+b_[8]+3*b_[9]+b_[10]+3*b_[11])%10
# if c==10:
# print(str(n)+'0')
# else:
# print(str(n)+str(c))
# isbn(978013213080)
# isbn(978013213079)
``` |
{
"source": "17565/game-review-web-app",
"score": 3
} |
#### File: 17565/game-review-web-app/index.py
```python
from flask import Flask, render_template, request, url_for
from flask_sqlalchemy import SQLAlchemy
from werkzeug.utils import redirect
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///games.db'
db = SQLAlchemy(app)
#game table
class Game(db.Model):
id = db.Column(db.Integer, primary_key=True)
game = db.Column(db.String(50))
image = db.Column(db.String(50))
release_date = db.Column(db.String(50))
gener_id = db.Column(db.Integer, db.ForeignKey('gener.id'),
nullable=False)
publisher_id = db.Column(db.Integer, db.ForeignKey('publisher.id'),
nullable=False)
developer_id = db.Column(db.Integer, db.ForeignKey('developer.id'),
nullable=False)
reviews = db.relationship('Review', backref='game', lazy=True)
#review table
class Review(db.Model):
id = db.Column(db.Integer, primary_key=True)
review = db.Column(db.String(50))
game_id = db.Column(db.Integer, db.ForeignKey('game.id'),
nullable=False)
# gener table
class Gener(db.Model):
id = db.Column(db.Integer, primary_key=True)
gener = db.Column(db.String(50))
games = db.relationship('Game', backref='gener', lazy=True)
#publisher table
class Publisher(db.Model):
id = db.Column(db.Integer, primary_key=True)
publisher = db.Column(db.String(50))
publisher_id = db.relationship('Game', backref='Publisher', lazy=True)
#developer table
class Developer(db.Model):
id = db.Column(db.Integer, primary_key=True)
developer = db.Column(db.String(50))
developer_id = db.relationship('Game', backref='Developer', lazy=True)
# @app.route()
@app.route("/games")
def games():
results = Game.query.all()
return render_template("my_games.html" , results=results)
#displays page
@app.route("/")
def home():
return render_template ("home_page.html")
#Add review page
@app.route("/add", methods=["POST", "GET"])
def add_review():
if request.method == "POST":
new_review = Review()
new_review.game_id = request.form.get('game')
new_review.review = request.form.get('review')
db.session.add(new_review)
db.session.commit()
return redirect('/add')
games = Game.query.all()
return render_template ("add_review.html", games=games)
@app.route("/remove", methods=["POST"])
def remove_review():
if request.method == "POST":
print(request.form.get('review'))
review = Review.query.filter_by(id=request.form.get('review')).first()
print(review)
db.session.delete(review)
db.session.commit()
return redirect(url_for('games'))
@app.route("/individuals/<int:id>")
def individuals(id):
game = Game.query.filter_by(id = id).first()
gener = Gener.query.filter_by(id=game.gener_id).first()
publisher = Publisher.query.filter_by(id=game.publisher_id).first()
developer = Developer.query.filter_by(id=game.developer_id).first()
return render_template("individual_game_page.html" , game=game,gener=gener,publisher=publisher,developer=developer)
#runs app
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "1757WestwoodRobotics/2022-RapidReact",
"score": 3
} |
#### File: 2022-RapidReact/commands/absoluterelativedrive.py
```python
from math import atan2
import typing
from commands2 import CommandBase
from wpimath.controller import PIDController
from wpimath.geometry import Rotation2d
from subsystems.drivesubsystem import DriveSubsystem
from util.angleoptimize import optimizeAngle
import constants
class AbsoluteRelativeDrive(CommandBase):
def __init__(
self,
drive: DriveSubsystem,
forward: typing.Callable[[], float],
sideways: typing.Callable[[], float],
rotationX: typing.Callable[[], float],
rotationY: typing.Callable[[], float],
) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.drive = drive
self.forward = forward
self.sideways = sideways
self.rotationPid = PIDController(
constants.kRotationPGain, constants.kRotationIGain, constants.kRotationDGain
)
self.rotationY = rotationY
self.rotationX = rotationX
self.addRequirements([self.drive])
self.setName(__class__.__name__)
def rotation(self) -> float:
targetRotation = atan2(
self.rotationX(), self.rotationY()
) # rotate to be relative to driver
if self.rotationX() == 0 and self.rotationY() == 0:
return 0
return self.rotationPid.calculate(
self.drive.getRotation().radians(),
optimizeAngle(
self.drive.getRotation(), Rotation2d(targetRotation)
).radians(),
)
def execute(self) -> None:
self.drive.arcadeDriveWithFactors(
self.forward(),
self.sideways(),
self.rotation(),
DriveSubsystem.CoordinateMode.FieldRelative,
)
```
#### File: commands/auto/fourblnoninvasive.py
```python
from os import path
from commands2 import ParallelCommandGroup, SequentialCommandGroup, WaitCommand
from wpimath.trajectory import TrajectoryConfig, TrajectoryUtil
from subsystems.drivesubsystem import DriveSubsystem
from subsystems.intakesubsystem import IntakeSubsystem
from subsystems.indexersubsystem import IndexerSubsystem
from subsystems.shootersubsystem import ShooterSubsystem
from commands.resetdrive import ResetDrive
from commands.intake.deployintake import DeployIntake
from commands.intake.retractintake import RetractIntake
from commands.indexer.feedforward import FeedForward
from commands.indexer.holdball import HoldBall
from commands.followtrajectory import FollowTrajectory
from commands.shooter.aimshootertotarget import AimShooterToTarget
import constants
class FourBLNoninvasiveMovements(SequentialCommandGroup):
def __init__(
self, drive: DriveSubsystem, intake: IntakeSubsystem, indexer: IndexerSubsystem
):
trajectoryConfig = TrajectoryConfig(
constants.kMaxForwardLinearVelocity, constants.kMaxForwardLinearAcceleration
)
trajectoryConfig.setKinematics(drive.kinematics)
pathA = TrajectoryUtil.fromPathweaverJson(
path.join(
path.dirname(path.realpath(__file__)),
"..",
"..",
"deploy",
"pathplanner",
"generatedJSON",
"4bL-noninvasive-a.wpilib.json",
)
)
pathB = TrajectoryUtil.fromPathweaverJson(
path.join(
path.dirname(path.realpath(__file__)),
"..",
"..",
"deploy",
"pathplanner",
"generatedJSON",
"4bL-noninvasive-b.wpilib.json",
)
)
pathC = TrajectoryUtil.fromPathweaverJson(
path.join(
path.dirname(path.realpath(__file__)),
"..",
"..",
"deploy",
"pathplanner",
"generatedJSON",
"4bL-noninvasive-c.wpilib.json",
)
)
super().__init__(
ResetDrive(drive, pathA.initialPose()),
DeployIntake(intake),
FollowTrajectory(drive, pathA), # pickup ball 2
RetractIntake(intake),
WaitCommand(constants.kAutoTimeFromStopToShoot),
FeedForward(indexer), # shoot balls 1 and 2
WaitCommand(constants.kAutoTimeFromShootToMove),
DeployIntake(intake),
HoldBall(indexer),
FollowTrajectory(drive, pathB), # move through hangar to terminal
RetractIntake(intake),
FollowTrajectory(drive, pathC), # move into shooting range
WaitCommand(constants.kAutoTimeFromStopToShoot),
FeedForward(indexer),
WaitCommand(constants.kAutoTimeFromShootToMove),
HoldBall(indexer),
)
class FourBLNoninvasive(ParallelCommandGroup):
def __init__(
self,
shooter: ShooterSubsystem,
drive: DriveSubsystem,
intake: IntakeSubsystem,
indexer: IndexerSubsystem,
):
self.setName(__class__.__name__)
super().__init__(
AimShooterToTarget(shooter),
FourBLNoninvasiveMovements(drive, intake, indexer),
)
```
#### File: commands/climber/holdcimbersposition.py
```python
from commands2 import CommandBase, ParallelCommandGroup
from subsystems.climbers.leftclimbersubsystem import LeftClimber
from subsystems.climbers.rightclimbersubsystem import RightClimber
class HoldLeftClimberPosition(CommandBase):
def __init__(self, climber: LeftClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.leftClimber.climberMotor.neutralOutput()
self.climber.leftClimber.activateBrake()
class HoldRightClimberPosition(CommandBase):
def __init__(self, climber: RightClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.rightClimber.climberMotor.neutralOutput()
self.climber.rightClimber.activateBrake()
class HoldBothClimbersPosition(ParallelCommandGroup):
def __init__(self, leftClimber: LeftClimber, rightClimber: RightClimber):
super().__init__(
HoldLeftClimberPosition(leftClimber),
HoldRightClimberPosition(rightClimber),
)
self.setName(__class__.__name__)
```
#### File: 2022-RapidReact/commands/followtrajectory.py
```python
from math import pi
from commands2 import Swerve4ControllerCommand
from wpimath.controller import (
PIDController,
ProfiledPIDControllerRadians,
)
from wpimath.trajectory import Trajectory, TrapezoidProfileRadians
from subsystems.drivesubsystem import DriveSubsystem
import constants
class FollowTrajectory(Swerve4ControllerCommand):
def __init__(self, drive: DriveSubsystem, trajectory: Trajectory) -> None:
self.drive = drive
self.xController = PIDController(
constants.kTrajectoryPositionPGain,
constants.kTrajectoryPositionIGain,
constants.kTrajectoryPositionDGain,
)
self.yController = PIDController(
constants.kTrajectoryPositionPGain,
constants.kTrajectoryPositionIGain,
constants.kTrajectoryPositionDGain,
)
self.thetaController = ProfiledPIDControllerRadians(
constants.kTrajectoryAnglePGain,
constants.kTrajectoryAngleIGain,
constants.kTrajectoryAngleDGain,
TrapezoidProfileRadians.Constraints(
constants.kMaxRotationAngularVelocity,
constants.kMaxRotationAngularAcceleration,
),
)
self.thetaController.enableContinuousInput(-pi, pi)
super().__init__(
trajectory,
self.drive.getPose,
self.drive.kinematics,
self.xController,
self.yController,
self.thetaController,
self.drive.applyStates,
[self.drive],
)
def end(self, _interrupted: bool) -> None:
self.drive.arcadeDriveWithFactors(
0, 0, 0, DriveSubsystem.CoordinateMode.RobotRelative
)
```
#### File: commands/intake/autoballintake.py
```python
from commands2 import CommandBase
from networktables import NetworkTables
from wpimath.trajectory import TrapezoidProfile, TrapezoidProfileRadians
from wpimath.controller import ProfiledPIDController, ProfiledPIDControllerRadians
from wpilib.geometry import Rotation2d
from wpilib import SmartDashboard
import constants
from subsystems.drivesubsystem import DriveSubsystem
from subsystems.intakesubsystem import IntakeSubsystem
from util import convenientmath
class AutoBallIntake(CommandBase):
def __init__(self, drive: DriveSubsystem, intake: IntakeSubsystem) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.drive = drive
self.intake = intake
self.distanceController = ProfiledPIDController(
constants.kDriveToBallPGain,
constants.kDriveToBallIGain,
constants.kDriveToBallDGain,
TrapezoidProfile.Constraints(
constants.kMaxForwardLinearVelocity,
constants.kMaxForwardLinearAcceleration,
),
)
self.ballAngle = Rotation2d()
self.distanceController.setTolerance( # These constants are reusable and likely will not need to change
constants.kDriveToTargetDistanceTolerance,
constants.kDriveToTargetLinearVelocityTolerance,
)
self.ballDistance = 0
self.addRequirements([self.drive, self.intake])
self.angleController = ProfiledPIDControllerRadians(
constants.kDriveToTargetAnglePGain,
constants.kDriveToTargetAngleIGain,
constants.kDriveToTargetAngleDGain,
TrapezoidProfileRadians.Constraints(
constants.kMaxRotationAngularVelocity,
constants.kMaxRotationAngularAcceleration,
),
)
self.angleController.setTolerance( # bet you can't guess what's copy/pasted
constants.kDriveToTargetAngleTolerance,
constants.kDriveToTargetAngularVelocityTolerance,
)
self.angleController.enableContinuousInput(
-1 * constants.kRadiansPerRevolution / 2,
constants.kRadiansPerRevolution / 2,
)
def updateDistanceToGoal(self) -> None:
if not (
SmartDashboard.getBoolean(
constants.kBallAngleRelativeToRobotKeys.validKey, False
)
and SmartDashboard.getBoolean(
constants.kBallDistanceRelativeToRobotKeys.validKey, False
)
):
self.ballDistance = 0
self.ballAngle = Rotation2d()
return
self.ballDistance = SmartDashboard.getNumber(
constants.kBallDistanceRelativeToRobotKeys.valueKey, 0
)
self.ballAngle = Rotation2d(
SmartDashboard.getNumber(
constants.kBallAngleRelativeToRobotKeys.valueKey, 0
)
)
def initialize(self) -> None:
CommandBase.initialize(self)
self.updateDistanceToGoal()
self.distanceController.reset(self.ballDistance)
self.angleController.reset(self.ballAngle.radians())
def execute(self) -> None:
if NetworkTables.getTable(constants.kLimelightCargoNetworkTableName).getBoolean(
constants.kLimelightTargetValidKey, False
):
self.updateDistanceToGoal()
distanceControllerOutput = self.distanceController.calculate(
-1 * self.ballDistance, 0
)
angleControllerOutput = self.angleController.calculate(
-1 * self.ballAngle.radians(), 0
)
distanceControllerAxisOutputs = (
convenientmath.translationFromDistanceAndRotation(
distanceControllerOutput, self.ballAngle
)
)
self.drive.arcadeDriveWithFactors(
distanceControllerAxisOutputs.X(),
distanceControllerAxisOutputs.Y(),
angleControllerOutput,
DriveSubsystem.CoordinateMode.RobotRelative,
)
self.intake.deployIntake()
else:
self.intake.retractIntake()
def end(self, _interrupted: bool) -> None:
self.drive.arcadeDriveWithFactors(
0, 0, 0, DriveSubsystem.CoordinateMode.RobotRelative
)
self.intake.retractIntake()
def isFinished(self) -> bool:
return self.distanceController.atGoal() and self.angleController.atGoal()
```
#### File: 2022-RapidReact/commands/normalballpath.py
```python
from commands2 import ParallelCommandGroup
from commands.indexer.holdball import HoldBall
from commands.intake.deployintake import DeployIntake
from subsystems.indexersubsystem import IndexerSubsystem
from subsystems.intakesubsystem import IntakeSubsystem
class NormalBallPath(ParallelCommandGroup):
def __init__(self, intake: IntakeSubsystem, indexer: IndexerSubsystem):
super().__init__(HoldBall(indexer), DeployIntake(intake))
self.setName(__class__.__name__)
```
#### File: 2022-RapidReact/commands/shootball.py
```python
from commands2 import ParallelCommandGroup
from commands.indexer.feedforward import FeedForward
from subsystems.indexersubsystem import IndexerSubsystem
class ShootBall(ParallelCommandGroup):
def __init__(self, indexer: IndexerSubsystem):
super().__init__(FeedForward(indexer))
self.setName(__class__.__name__)
```
#### File: commands/shooter/aimshootermanual.py
```python
from commands2 import CommandBase
from wpilib import SmartDashboard
from wpimath.geometry import Rotation2d
from subsystems.shootersubsystem import ShooterSubsystem
import constants
class AimShooterManually(CommandBase):
def __init__(self, shooter: ShooterSubsystem) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.shooter = shooter
self.addRequirements([self.shooter])
SmartDashboard.putNumber(constants.kShootingHoodAngleKey, 8)
def execute(self) -> None:
wheelSpeed = SmartDashboard.getNumber(constants.kShootingWheelSpeedKey, 550)
hoodAngle = SmartDashboard.getNumber(constants.kShootingHoodAngleKey, 8)
turretPosition = SmartDashboard.getNumber(constants.kShootingTurretAngleKey, 0)
self.shooter.setWheelSpeed(wheelSpeed)
self.shooter.setHoodAngle(Rotation2d.fromDegrees(hoodAngle))
self.shooter.rotateTurret(Rotation2d.fromDegrees(turretPosition))
```
#### File: commands/shooter/stopaimsystem.py
```python
from commands2 import CommandBase, ParallelCommandGroup
from wpimath.geometry import Rotation2d
from subsystems.shootersubsystem import ShooterSubsystem
from subsystems.indexersubsystem import IndexerSubsystem
from commands.indexer.stopindexer import StopIndexer
class StopAimSystem(CommandBase):
def __init__(self, shooter: ShooterSubsystem) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.shooter = shooter
self.addRequirements([self.shooter])
def execute(self) -> None:
self.shooter.shootingMotor.neutralOutput()
self.shooter.rotateTurret(Rotation2d.fromDegrees(0))
class StopMovingParts(ParallelCommandGroup):
def __init__(self, indexer: IndexerSubsystem, shooter: ShooterSubsystem):
super().__init__(StopIndexer(indexer), StopAimSystem(shooter))
self.setName(__class__.__name__)
# pylint: disable-next=no-self-use
def isFinished(self) -> bool:
return False
```
#### File: 1757WestwoodRobotics/2022-RapidReact/robot.py
```python
import typing
import wpilib
import commands2
from robotcontainer import RobotContainer
class MentorBot(commands2.TimedCommandRobot):
"""
Our default robot class, pass it to wpilib.run
Command v2 robots are encouraged to inherit from TimedCommandRobot, which
has an implementation of robotPeriodic which runs the scheduler for you
"""
autonomousCommand: typing.Optional[commands2.Command] = None
def __init__(self):
super().__init__()
self.container = None
def robotInit(self) -> None:
"""
This function is run when the robot is first started up and should be used for any
initialization code.
"""
# Instantiate our RobotContainer. This will perform all our button bindings, and put our
# autonomous chooser on the dashboard.
self.container = RobotContainer()
def disabledInit(self) -> None:
"""This function is called once each time the robot enters Disabled mode."""
def disabledPeriodic(self) -> None:
"""This function is called periodically when disabled"""
def autonomousInit(self) -> None:
"""This autonomous runs the autonomous command selected by your RobotContainer class."""
self.autonomousCommand = self.container.getAutonomousCommand()
if self.autonomousCommand:
self.autonomousCommand.schedule()
def autonomousPeriodic(self) -> None:
"""This function is called periodically during autonomous"""
def teleopInit(self) -> None:
# This makes sure that the autonomous stops running when
# teleop starts running. If you want the autonomous to
# continue until interrupted by another command, remove
# this line or comment it out.
if self.autonomousCommand:
self.autonomousCommand.cancel()
def teleopPeriodic(self) -> None:
"""This function is called periodically during operator control"""
# pylint: disable-next=no-self-use
def testInit(self) -> None:
# Cancels all running commands at the start of test mode
commands2.CommandScheduler.getInstance().cancelAll()
if __name__ == "__main__":
wpilib.run(MentorBot)
```
#### File: subsystems/climbers/leftclimbersubsystem.py
```python
from commands2 import SubsystemBase
from wpilib import SmartDashboard
from subsystems.climbers.climbersubsystem import ClimberModule
import constants
class LeftClimber(SubsystemBase):
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.setName(__class__.__name__)
self.leftClimber = ClimberModule(
constants.kLeftClimberMotorName,
constants.kLeftClimberMotorCanID,
constants.kClimberMotorPGain,
constants.kClimberMotorIGain,
constants.kClimberMotorDGain,
constants.kLeftClimberBrakePCMID,
constants.kLeftClimberPivotSolenoidForwardActuationID,
constants.kLeftClimberPivotSolenoidBackwardActuationID,
constants.kLeftClimberInverted,
)
def periodic(self) -> None:
SmartDashboard.putNumber(
constants.kLeftClimberEncoderTicksKey,
self.leftClimber.climberMotor.getSelectedSensorPosition(),
)
```
#### File: subsystems/climbers/rightclimbersubsystem.py
```python
from commands2 import SubsystemBase
from wpilib import SmartDashboard
from subsystems.climbers.climbersubsystem import ClimberModule
import constants
class RightClimber(SubsystemBase):
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.setName(__class__.__name__)
self.rightClimber = ClimberModule(
constants.kRightClimberMotorName,
constants.kRightClimberMotorCanID,
constants.kClimberMotorPGain,
constants.kClimberMotorIGain,
constants.kClimberMotorDGain,
constants.kRightClimberBrakePCMID,
constants.kRightClimberPivotSolenoidForwardActuationID,
constants.kRightClimberPivotSolenoidBackwardActuationID,
)
def periodic(self) -> None:
SmartDashboard.putNumber(
constants.kRightClimberEncoderTicksKey,
self.rightClimber.climberMotor.getSelectedSensorPosition(),
)
```
#### File: 2022-RapidReact/subsystems/shootersubsystem.py
```python
from commands2 import SubsystemBase
from wpilib import SmartDashboard, RobotBase
from wpimath.geometry import Rotation2d
from ctre import ControlMode, NeutralMode
from util.angleoptimize import optimizeAngle
from util.convenientmath import map_range
from util.ctrecheck import ctreCheckError
from util.simfalcon import createMotor
import constants
class ShooterSubsystem(SubsystemBase):
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.setName(__class__.__name__)
# actuators
# TURRET
self.turretMotor = createMotor(constants.kTurretMotorId)
print(f"Initializing Falcon: {constants.kTurretMotorName}")
if not ctreCheckError(
"configFactoryDefault",
self.turretMotor.configFactoryDefault(constants.kConfigurationTimeoutLimit),
):
return
if not ctreCheckError(
"config_kP",
self.turretMotor.config_kP(
constants.kTurretMotorPIDSlot,
constants.kTurretMotorPGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kI",
self.turretMotor.config_kI(
constants.kTurretMotorPIDSlot,
constants.kTurretMotorIGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kD",
self.turretMotor.config_kD(
constants.kTurretMotorPIDSlot,
constants.kTurretMotorDGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_Invert",
self.turretMotor.setInverted(constants.kTurretMotorInverted),
):
return
self.turretMotor.setNeutralMode(NeutralMode.Brake)
# SHOOTER
self.shootingMotor = createMotor(constants.kShootingMotorId)
print(f"Initializing Falcon: {constants.kShootingMotorName}")
if not ctreCheckError(
"configFactoryDefault",
self.shootingMotor.configFactoryDefault(
constants.kConfigurationTimeoutLimit
),
):
return
if not ctreCheckError(
"config_kP",
self.shootingMotor.config_kP(
constants.kShootingMotorPIDSlot,
constants.kShootingMotorPGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kI",
self.shootingMotor.config_kI(
constants.kShootingMotorPIDSlot,
constants.kShootingMotorIGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kD",
self.shootingMotor.config_kD(
constants.kShootingMotorPIDSlot,
constants.kShootingMotorDGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_Invert",
self.shootingMotor.setInverted(constants.kShootingMotorInverted),
):
return
# HOOD
self.hoodMotor = createMotor(constants.kHoodMotorId)
print(f"Initializing Falcon: {constants.kHoodMotorName}")
if not ctreCheckError(
"configFactoryDefault",
self.hoodMotor.configFactoryDefault(constants.kConfigurationTimeoutLimit),
):
return
if not ctreCheckError(
"config_kP",
self.hoodMotor.config_kP(
constants.kHoodMotorPIDSlot,
constants.kHoodMotorPGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kI",
self.hoodMotor.config_kI(
constants.kHoodMotorPIDSlot,
constants.kHoodMotorIGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kD",
self.hoodMotor.config_kD(
constants.kHoodMotorPIDSlot,
constants.kHoodMotorDGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_Invert",
self.hoodMotor.setInverted(constants.kHoodMotorInverted),
):
return
self.initializationMinimum = -1
self.initializationMaximum = 1
self.targetTurretAngle = Rotation2d()
self.targetHoodAngle = Rotation2d()
self.targetWheelSpeed = 0
def setAsStartingPosition(self) -> None:
self.hoodMotor.setSelectedSensorPosition(constants.kHoodStartingAngle)
if (
RobotBase.isReal()
): # only possible to calibrate the real robot, sim is perfect by default
turretRealPosition = map_range(
self.turretMotor.getSelectedSensorPosition(),
self.initializationMinimum,
self.initializationMaximum,
constants.kTurretMinimumAngle.radians()
* constants.kTalonEncoderPulsesPerRadian,
constants.kTurretMaximumAngle.radians()
* constants.kTalonEncoderPulsesPerRadian,
)
self.turretMotor.setSelectedSensorPosition(turretRealPosition)
if not ctreCheckError(
"brake_set", self.turretMotor.setNeutralMode(NeutralMode.Brake)
):
return
def periodic(self) -> None:
self.initializationMinimum = min(
self.initializationMinimum, self.turretMotor.getSelectedSensorPosition()
)
self.initializationMaximum = max(
self.initializationMaximum, self.turretMotor.getSelectedSensorPosition()
)
SmartDashboard.putNumber(
"mappedVal",
map_range(
self.turretMotor.getSelectedSensorPosition(),
self.initializationMinimum,
self.initializationMaximum,
constants.kTurretMinimumAngle.radians()
* constants.kTalonEncoderPulsesPerRadian,
constants.kTurretMaximumAngle.radians()
* constants.kTalonEncoderPulsesPerRadian,
),
)
if not SmartDashboard.getBoolean(constants.kShootingManualModeKey, True):
SmartDashboard.putNumber(
constants.kShootingWheelSpeedKey, self.getWheelSpeed()
)
SmartDashboard.putNumber(
constants.kShootingHoodAngleKey, self.getHoodAngle().degrees()
)
SmartDashboard.putNumber(
constants.kShootingTurretAngleKey, self.getTurretRotation().degrees()
)
SmartDashboard.putBoolean(
constants.kShootingFlywheelOnTargetKey, self.wheelOnTarget()
)
SmartDashboard.putBoolean(
constants.kShootingHoodOnTargetKey, self.hoodOnTarget()
)
SmartDashboard.putBoolean(
constants.kShootingTurretOnTargetKey, self.turretOnTarget()
)
def wheelOnTarget(self) -> bool:
return (
abs(self.getWheelSpeed() - self.targetWheelSpeed)
<= constants.kWheelSpeedTolerence
)
def hoodOnTarget(self) -> bool:
return (
abs((self.getHoodAngle() - self.targetHoodAngle).radians())
<= constants.kHoodAngleTolerence.radians()
)
def turretOnTarget(self) -> bool:
return (
abs((self.getTurretRotation() - self.targetTurretAngle).radians())
<= constants.kTurretAngleTolerence.radians()
)
def getWheelSpeed(self) -> int:
"""returns wheel speed in RPM"""
return (
self.shootingMotor.getSelectedSensorVelocity()
/ constants.kTalonVelocityPerRPM
)
def setWheelSpeed(self, speed: int) -> None:
self.targetWheelSpeed = speed
self.shootingMotor.set(
ControlMode.Velocity, speed * constants.kTalonVelocityPerRPM
)
def setHoodAngle(self, angle: Rotation2d) -> None:
"""angle to fire the ball with
absolute with 0 being straight and 90 degrees being direct to the sky"""
self.targetHoodAngle = angle
clampedAngle = min(
max(
angle.radians(),
(constants.kHoodMinimum + constants.kHoodSoftLimitBuffer).radians(),
),
(constants.kHoodMaximum - constants.kHoodSoftLimitBuffer).radians(),
)
encoderPulses = (
clampedAngle
* constants.kTalonEncoderPulsesPerRadian
/ constants.kHoodGearRatio
)
self.hoodMotor.set(ControlMode.Position, encoderPulses)
def getHoodAngle(self) -> Rotation2d:
return Rotation2d(
self.hoodMotor.getSelectedSensorPosition()
/ constants.kTalonEncoderPulsesPerRadian
* constants.kHoodGearRatio
)
def rotateTurret(self, angle: Rotation2d) -> None:
if (
angle.radians()
> constants.kTurretMaximumAngle.radians()
- constants.kTurretSoftLimitBuffer.radians()
or angle.radians()
< constants.kTurretMinimumAngle.radians()
+ constants.kTurretSoftLimitBuffer.radians()
):
return
self.targetTurretAngle = angle
encoderPulses = (
angle.radians()
* constants.kTalonEncoderPulsesPerRadian
/ constants.kTurretGearRatio
)
self.turretMotor.set(ControlMode.Position, encoderPulses)
def getTurretRotation(self) -> Rotation2d:
angle = Rotation2d(
(
self.turretMotor.getSelectedSensorPosition()
/ constants.kTalonEncoderPulsesPerRadian
)
* constants.kTurretGearRatio
)
return angle
def trackTurret(self, relativeAngle: float):
"""relativeAngle: radians"""
rotation = self.getTurretRotation() + Rotation2d(relativeAngle)
self.rotateTurret(
optimizeAngle(constants.kTurretRelativeForwardAngle, rotation)
)
```
#### File: 2022-RapidReact/subsystems/visionsubsystem.py
```python
import typing
from commands2 import SubsystemBase
from networktables import NetworkTables
from wpilib import SmartDashboard
from wpimath.geometry import Pose2d, Rotation2d, Transform2d
import constants
from util import convenientmath
class TrackingModule:
def __init__(self, name: str) -> None:
self.name = name
def getTargetAngle(self) -> Rotation2d:
raise NotImplementedError("Must be implemented by subclass")
def getTargetDistance(self) -> float:
raise NotImplementedError("Must be implemented by subclass")
def getTargetFacingAngle(self) -> Rotation2d:
raise NotImplementedError("Must be implemented by subclass")
def update(self) -> None:
targetAngle = self.getTargetAngle()
if targetAngle is not None:
SmartDashboard.putNumber(
constants.kTargetAngleRelativeToRobotKeys.valueKey,
convenientmath.normalizeRotation(targetAngle).radians(),
)
SmartDashboard.putBoolean(
constants.kTargetAngleRelativeToRobotKeys.validKey, True
)
else:
SmartDashboard.putBoolean(
constants.kTargetAngleRelativeToRobotKeys.validKey, False
)
targetDistance = self.getTargetDistance()
if targetDistance is not None:
SmartDashboard.putNumber(
constants.kTargetDistanceRelativeToRobotKeys.valueKey, targetDistance
)
SmartDashboard.putBoolean(
constants.kTargetDistanceRelativeToRobotKeys.validKey, True
)
else:
SmartDashboard.putBoolean(
constants.kTargetDistanceRelativeToRobotKeys.validKey, False
)
targetFacingAngle = self.getTargetFacingAngle()
if targetFacingAngle is not None:
SmartDashboard.putNumber(
constants.kTargetFacingAngleRelativeToRobotKeys.valueKey,
convenientmath.normalizeRotation(targetFacingAngle).radians(),
)
SmartDashboard.putBoolean(
constants.kTargetFacingAngleRelativeToRobotKeys.validKey, True
)
else:
SmartDashboard.putBoolean(
constants.kTargetFacingAngleRelativeToRobotKeys.validKey, False
)
def reset(self) -> None:
raise NotImplementedError("Must be implemented by subclass")
class SimTrackingModule(TrackingModule):
"""
Implementation of TrackingModule designed for ease of simulation:
Uses externally provided field-relative target pose to calculate simulated tracking data
"""
def __init__(
self,
name: str,
targetPoseArrayKey: str,
) -> None:
TrackingModule.__init__(self, name)
self.targetPoseArrayKey = targetPoseArrayKey
self.targetAngle = Rotation2d()
self.targetDistance = 0
self.targetFacingAngle = Rotation2d()
def getTargetAngle(self) -> typing.Optional[Rotation2d]:
return self.targetAngle
def getTargetDistance(self) -> typing.Optional[float]:
return self.targetDistance
def getTargetFacingAngle(self) -> typing.Optional[Rotation2d]:
return self.targetFacingAngle
def update(self) -> None:
targetPoseX, targetPoseY, targetAngle = SmartDashboard.getNumberArray(
constants.kSimTargetPoseArrayKey, [0, 0, 0]
)
targetPose = Pose2d(targetPoseX, targetPoseY, targetAngle)
robotPoseX, robotPoseY, robotPoseAngle = SmartDashboard.getNumberArray(
constants.kSimRobotPoseArrayKey, [0, 0, 0]
)
robotPose = Pose2d(robotPoseX, robotPoseY, robotPoseAngle)
robotToTarget = Transform2d(robotPose, targetPose)
self.targetAngle = Rotation2d(robotToTarget.X(), robotToTarget.Y())
self.targetDistance = robotToTarget.translation().norm()
self.targetFacingAngle = robotToTarget.rotation()
TrackingModule.update(self)
def reset(self) -> None:
pass
class LimelightTrackingModule(TrackingModule):
"""
Implementation of TrackingModule designed for use with the Limelight smart-camera
"""
def __init__(
self,
name: str,
) -> None:
TrackingModule.__init__(self, name)
self.targetAngle = None
self.targetDistance = None
self.targetFacingAngle = None
NetworkTables.initialize()
self.limelightNetworkTable = NetworkTables.getTable(
constants.kLimelightNetworkTableName
)
def getTargetAngle(self) -> typing.Optional[Rotation2d]:
return self.targetAngle
def getTargetDistance(self) -> typing.Optional[float]:
return self.targetDistance
def getTargetFacingAngle(self) -> typing.Optional[Rotation2d]:
return self.targetFacingAngle
def update(self) -> None:
targetValid = self.limelightNetworkTable.getNumber(
constants.kLimelightTargetValidKey, constants.kLimelightTargetInvalidValue
)
if targetValid:
# real model has limelight rotated 90 degrees
self.targetAngle = Rotation2d.fromDegrees(
self.limelightNetworkTable.getNumber(
constants.kLimelightTargetVerticalAngleKey, 0.0
)
)
self.targetDistance = (
constants.kSimDefaultTargetHeight - constants.kLimelightVerticalOffset
) / (
Rotation2d.fromDegrees(
self.limelightNetworkTable.getNumber(
constants.kLimelightTargetHorizontalAngleKey, 0.0
)
)
+ constants.kLimelightVerticalAngleOffset
).tan()
else:
self.targetDistance = None
self.targetAngle = None
TrackingModule.update(self)
def reset(self) -> None:
pass
class VisionSubsystem(SubsystemBase):
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.setName(__class__.__name__)
self.trackingModule = LimelightTrackingModule(
constants.kLimelightTrackerModuleName
)
def resetTrackingModule(self):
self.trackingModule.reset()
def periodic(self):
"""
Called periodically by the command framework. Updates the estimate of the target's pose from the tracking data.
"""
self.trackingModule.update()
targetAngle = self.trackingModule.getTargetAngle()
if targetAngle is None:
SmartDashboard.putBoolean(constants.kTargetPoseArrayKeys.validKey, False)
SmartDashboard.putBoolean(
constants.kRobotVisionPoseArrayKeys.validKey, False
)
else:
targetDistance = self.trackingModule.getTargetDistance()
if targetDistance is None:
targetDistance = float("inf")
targetDistance += constants.kSimTargetUpperHubRadius
targetFacingAngle = self.trackingModule.getTargetFacingAngle()
if targetFacingAngle is None:
targetFacingAngle = Rotation2d()
robotPoseX, robotPoseY, robotPoseAngle = SmartDashboard.getNumberArray(
constants.kRobotPoseArrayKeys.valueKey, [0, 0, 0]
)
turretRotation = (
SmartDashboard.getNumber(constants.kShootingTurretAngleKey, 0)
* constants.kRadiansPerDegree
)
robotPose = Pose2d(robotPoseX, robotPoseY, robotPoseAngle + turretRotation)
robotToTarget = Transform2d(
convenientmath.translationFromDistanceAndRotation(
targetDistance, targetAngle
),
targetFacingAngle,
)
targetPose = robotPose + robotToTarget
SmartDashboard.putNumberArray(
constants.kTargetPoseArrayKeys.valueKey,
[targetPose.X(), targetPose.Y(), targetPose.rotation().radians()],
)
SmartDashboard.putBoolean(constants.kTargetPoseArrayKeys.validKey, True)
# calculate position on field based purely on vision data
netRotation = Rotation2d(
robotPoseAngle + turretRotation + targetAngle.radians()
)
horizontalOffset = netRotation.cos() * (targetDistance)
verticalOffset = netRotation.sin() * (targetDistance)
robotEstimatedPose = Pose2d(
constants.kSimDefaultTargetLocation.X() + horizontalOffset,
constants.kSimDefaultTargetLocation.Y() + verticalOffset,
robotPoseAngle,
)
SmartDashboard.putNumberArray(
constants.kRobotVisionPoseArrayKeys.valueKey,
[
robotEstimatedPose.X(),
robotEstimatedPose.Y(),
robotEstimatedPose.rotation().radians(),
],
)
SmartDashboard.putBoolean(
constants.kRobotVisionPoseArrayKeys.validKey, True
)
``` |
{
"source": "1757WestwoodRobotics/mentorbot",
"score": 3
} |
#### File: mentorbot/commands/rotatecamera.py
```python
import typing
from commands2 import CommandBase
from subsystems.cameracontroller import CameraSubsystem
class RotateCamera(CommandBase):
def __init__(self, camera: CameraSubsystem,
leftRight: typing.Callable[[], float],
upDown: typing.Callable[[], float]) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.camera = camera
self.leftRight = leftRight
self.upDown = upDown
self.addRequirements([self.camera])
self.setName(__class__.__name__)
def execute(self) -> None:
self.camera.setCameraRotation(self.leftRight(), self.upDown())
```
#### File: mentorbot/commands/setcannon.py
```python
from enum import Enum, auto
from subsystems.cannonsubsystem import CannonSubsystem
from commands2 import CommandBase
class SetCannon(CommandBase):
class Mode(Enum):
Off = auto()
Fill = auto()
Launch = auto()
def __init__(self, cannon: CannonSubsystem, mode: Mode) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.cannon = cannon
self.mode = mode
self.addRequirements([self.cannon])
self.funcs = {
SetCannon.Mode.Off: self.cannon.close,
SetCannon.Mode.Fill: self.cannon.fill,
SetCannon.Mode.Launch: self.cannon.launch
}
self.isFinished = lambda: True
def execute(self) -> None:
self.funcs[self.mode]()
```
#### File: mentorbot/commands/varyoutput.py
```python
from subsystems.lightsubsystem import LightSubsystem
import typing
from commands2 import CommandBase
class RelayControl(CommandBase):
def __init__(self, controller: LightSubsystem,
controlPercent: typing.Callable[[], float]) -> None:
CommandBase.__init__(self)
self.control = controller
self.controlPercentCommand = controlPercent
self.setOutputPercent = lambda percent: self.control.light.set(percent)
self.addRequirements([self.control])
self.setName(__class__.__name__)
def execute(self) -> None:
self.setOutputPercent(self.controlPercentCommand())
def end(self, interrupted: bool) -> None:
self.setOutputPercent(0.0)
```
#### File: mentorbot/subsystems/cannonsubsystem.py
```python
from commands2 import SubsystemBase
from ctre import WPI_VictorSPX
from wpilib import Solenoid
from wpilib import AnalogInput
import constants
def map(pressureinput: float, voltmin: float, voltmax: float,
pressuremin: float, pressuremax: float) -> None:
return (pressureinput - pressuremin) * (
(voltmax - voltmin) / (pressuremax - pressuremin)) + voltmin
class CannonSubsystem(SubsystemBase):
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.launchSolonoid = WPI_VictorSPX(
constants.kCannonLaunchVictorDeviceID)
self.fillSolonoid = Solenoid(constants.kPCMCannonCanID,
constants.kCannonFillPCMID)
self.pressure = AnalogInput(constants.kCannonPressureAnalogInput)
self.fillSolonoid.set(False)
self.launchSolonoid.set(0.0)
def close(self) -> None:
"""close all the solonoids"""
self.fillSolonoid.set(False)
self.launchSolonoid.set(0.0)
print(
map(self.pressure.getVoltage(), constants.kVoltageOutMin,
constants.kVoltageOutMax, constants.kPressureInMin,
constants.kPressureInMax))
# print("CLOSING")
def fill(self) -> None:
"""begins filling staging tank"""
# self.launchSolonoid.set(
# Relay.Value.kOff
# ) #ensure air doesnt just flow out the end without being stored
self.launchSolonoid.set(0.0)
self.fillSolonoid.set(True)
print(self.fillSolonoid.get())
print("FILLING")
def launch(self) -> None:
self.fillSolonoid.set(False)
self.launchSolonoid.set(1.0)
print(self.launchSolonoid.get())
```
#### File: mentorbot/subsystems/drivesubsystem.py
```python
from math import cos, sin
from commands2 import SubsystemBase
from wpilib import Encoder, PWMVictorSPX, RobotBase, Timer
from ctre import (
AbsoluteSensorRange,
CANCoder,
ControlMode,
ErrorCode,
SensorInitializationStrategy,
WPI_TalonFX,
)
from navx import AHRS
from wpimath.geometry import Rotation2d, Pose2d
from wpimath.kinematics import (
ChassisSpeeds,
SwerveModuleState,
SwerveDrive4Kinematics,
SwerveDrive4Odometry,
)
from enum import Enum, auto
from typing import Tuple
import constants
class SwerveModule:
def __init__(self, name: str) -> None:
self.name = name
def getSwerveAngle(self) -> Rotation2d:
raise NotImplementedError("Must be implemented by subclass")
def setSwerveAngle(self, swerveAngle: Rotation2d) -> None:
raise NotImplementedError("Must be implemented by subclass")
def setSwerveAngleTarget(self, swerveAngleTarget: Rotation2d) -> None:
raise NotImplementedError("Must be implemented by subclass")
def getWheelLinearVelocity(self) -> float:
raise NotImplementedError("Must be implemented by subclass")
def setWheelLinearVelocityTarget(self,
wheelLinearVelocityTarget: float) -> None:
raise NotImplementedError("Must be implemented by subclass")
def reset(self) -> None:
raise NotImplementedError("Must be implemented by subclass")
def getState(self) -> SwerveModuleState:
return SwerveModuleState(
self.getWheelLinearVelocity(),
self.getSwerveAngle(),
)
def applyState(self, state: SwerveModuleState) -> None:
optimizedState = SwerveModuleState.optimize(state,
self.getSwerveAngle())
# optimizedState = state
self.setWheelLinearVelocityTarget(optimizedState.speed)
self.setSwerveAngleTarget(optimizedState.angle)
class PWMSwerveModule(SwerveModule):
"""
Implementation of SwerveModule designed for ease of simulation:
wheelMotor: 1:1 gearing with wheel
swerveMotor: 1:1 gearing with swerve
wheelEncoder: wheel distance (meters)
swerveEncoder: swerve angle (radians)
"""
def __init__(
self,
name: str,
wheelMotor: PWMVictorSPX,
swerveMotor: PWMVictorSPX,
wheelEncoder: Encoder,
swerveEncoder: Encoder,
) -> None:
SwerveModule.__init__(self, name)
self.wheelMotor = wheelMotor
self.swerveMotor = swerveMotor
self.wheelEncoder = wheelEncoder
self.swerveEncoder = swerveEncoder
self.wheelEncoder.setDistancePerPulse(
1 / constants.kWheelEncoderPulsesPerMeter)
self.swerveEncoder.setDistancePerPulse(
1 / constants.kSwerveEncoderPulsesPerRadian)
def getSwerveAngle(self) -> Rotation2d:
return Rotation2d(self.swerveEncoder.getDistance())
def setSwerveAngleTarget(self, swerveAngleTarget: Rotation2d) -> None:
swerveError = swerveAngleTarget.radians(
) - self.swerveEncoder.getDistance()
swerveErrorClamped = min(max(swerveError, -1), 1)
self.swerveMotor.setSpeed(swerveErrorClamped)
def getWheelLinearVelocity(self) -> float:
return self.wheelEncoder.getRate()
def setWheelLinearVelocityTarget(self,
wheelLinearVelocityTarget: float) -> None:
speedFactor = wheelLinearVelocityTarget / constants.kMaxWheelLinearVelocity
speedFactorClamped = min(max(speedFactor, -1), 1)
self.wheelMotor.setSpeed(speedFactorClamped)
def reset(self) -> None:
pass
class CTRESwerveModule(SwerveModule):
"""
Implementation of SwerveModule for the SDS swerve modules
https://www.swervedrivespecialties.com/collections/kits/products/mk4-swerve-module
driveMotor: Falcon 500 Motor (with built-in encoder) attached to wheel through gearing
steerMotor: Falcon 500 Motor (with built-in encoder) attached to swerve through gearing
swerveEncoder: CANCoder
"""
def __init__(
self,
name: str,
driveMotor: WPI_TalonFX,
driveMotorInverted: bool,
steerMotor: WPI_TalonFX,
steerMotorInverted: bool,
swerveEncoder: CANCoder,
swerveEncoderOffset: float,
) -> None:
SwerveModule.__init__(self, name)
self.driveMotor = driveMotor
self.driveMotorInverted = driveMotorInverted
self.steerMotor = steerMotor
self.steerMotorInverted = steerMotorInverted
self.swerveEncoder = swerveEncoder
self.swerveEncoderOffset = swerveEncoderOffset
def ctreCheckError(name: str, errorCode: ErrorCode) -> bool:
if (errorCode is not None) and (errorCode != ErrorCode.OK):
print("ERROR: {}: {}".format(name, errorCode))
return False
return True
print("Initializing swerve module: {}".format(self.name))
print(" Configuring swerve encoder: CAN ID: {}".format(
self.swerveEncoder.getDeviceNumber()))
if not ctreCheckError(
"configFactoryDefault",
self.swerveEncoder.configFactoryDefault(
constants.kConfigurationTimeoutLimit),
):
return
if not ctreCheckError(
"configSensorInitializationStrategy",
self.swerveEncoder.configSensorInitializationStrategy(
SensorInitializationStrategy.BootToAbsolutePosition,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"configMagnetOffset",
self.swerveEncoder.configMagnetOffset(
-1 * self.
swerveEncoderOffset, # invert the offset to zero the encoder
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"configAbsoluteSensorRange",
self.swerveEncoder.configAbsoluteSensorRange(
AbsoluteSensorRange.Signed_PlusMinus180,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"setPositionToAbsolute",
self.swerveEncoder.setPositionToAbsolute(
constants.kConfigurationTimeoutLimit, ),
):
return
print(" ... Done")
print(" Configuring drive motor: CAN ID: {}".format(
self.driveMotor.getDeviceID()))
if not ctreCheckError(
"configFactoryDefault",
self.driveMotor.configFactoryDefault(
constants.kConfigurationTimeoutLimit),
):
return
# config = TalonFXConfiguration()
# if not ctreCheckError(
# "getAllConfigs",
# self.driveMotor.getAllConfigs(config, constants.kConfigurationTimeoutLimit),
# ):
# return
# else:
# print(" Config:\n{}".format(config.toString()))
self.driveMotor.setInverted(self.driveMotorInverted)
if not ctreCheckError(
"config_kP",
self.driveMotor.config_kP(
constants.kDrivePIDSlot,
constants.kDrivePGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kI",
self.driveMotor.config_kI(
constants.kDrivePIDSlot,
constants.kDriveIGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kD",
self.driveMotor.config_kD(
constants.kDrivePIDSlot,
constants.kDriveDGain,
constants.kConfigurationTimeoutLimit,
),
):
return
print(" ... Done")
print(" Configuring steer motor: CAN ID: {}".format(
self.steerMotor.getDeviceID()))
if not ctreCheckError(
"configFactoryDefault",
self.steerMotor.configFactoryDefault(
constants.kConfigurationTimeoutLimit),
):
return
# config = TalonFXConfiguration()
# if not ctreCheckError(
# "getAllConfigs",
# self.driveMotor.getAllConfigs(config, constants.kConfigurationTimeoutLimit),
# ):
# return
# else:
# print(" Config:\n{}".format(config.toString()))
self.steerMotor.setInverted(self.steerMotorInverted)
if not ctreCheckError(
"config_kP",
self.steerMotor.config_kP(
constants.kSteerPIDSlot,
constants.kSteerPGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kI",
self.steerMotor.config_kI(
constants.kSteerPIDSlot,
constants.kSteerIGain,
constants.kConfigurationTimeoutLimit,
),
):
return
if not ctreCheckError(
"config_kD",
self.steerMotor.config_kD(
constants.kSteerPIDSlot,
constants.kSteerDGain,
constants.kConfigurationTimeoutLimit,
),
):
return
print(" ... Done")
print("... Done")
def getSwerveAngle(self) -> Rotation2d:
steerEncoderPulses = self.steerMotor.getSelectedSensorPosition()
swerveAngle = steerEncoderPulses / constants.kSwerveEncoderPulsesPerRadian
# print("Steer[{}]: {}".format(self.steerMotor.getDeviceID(), swerveAngle))
return Rotation2d(swerveAngle)
def setSwerveAngle(self, swerveAngle: Rotation2d) -> None:
steerEncoderPulses = (swerveAngle.radians() *
constants.kSwerveEncoderPulsesPerRadian)
self.steerMotor.setSelectedSensorPosition(steerEncoderPulses)
def setSwerveAngleTarget(self, swerveAngleTarget: Rotation2d) -> None:
steerEncoderPulsesTarget = (swerveAngleTarget.radians() *
constants.kSwerveEncoderPulsesPerRadian)
self.steerMotor.set(ControlMode.Position, steerEncoderPulsesTarget)
def getWheelLinearVelocity(self) -> float:
driveEncoderPulsesPerSecond = (
self.driveMotor.getSelectedSensorVelocity() *
constants.k100MillisecondsPerSecond)
wheelLinearVelocity = (driveEncoderPulsesPerSecond /
constants.kWheelEncoderPulsesPerMeter)
return wheelLinearVelocity
def setWheelLinearVelocityTarget(self,
wheelLinearVelocityTarget: float) -> None:
driveEncoderPulsesPerSecond = (wheelLinearVelocityTarget *
constants.kWheelEncoderPulsesPerMeter)
self.driveMotor.set(
ControlMode.Velocity,
driveEncoderPulsesPerSecond / constants.k100MillisecondsPerSecond,
)
def reset(self) -> None:
swerveEncoderAngle = (self.swerveEncoder.getAbsolutePosition() *
constants.kRadiansPerDegree)
self.setSwerveAngle(Rotation2d(swerveEncoderAngle))
class DriveSubsystem(SubsystemBase):
class CoordinateMode(Enum):
RobotRelative = auto()
FieldRelative = auto()
def __init__(self) -> None:
SubsystemBase.__init__(self)
self.setName(__class__.__name__)
if RobotBase.isReal():
self.frontLeftModule = CTRESwerveModule(
constants.kFrontLeftModuleName,
WPI_TalonFX(constants.kFrontLeftDriveMotorId),
constants.kFrontLeftDriveInverted,
WPI_TalonFX(constants.kFrontLeftSteerMotorId),
constants.kFrontLeftSteerInverted,
CANCoder(constants.kFrontLeftSteerEncoderId),
constants.kFrontLeftAbsoluteEncoderOffset,
)
self.frontRightModule = CTRESwerveModule(
constants.kFrontRightModuleName,
WPI_TalonFX(constants.kFrontRightDriveMotorId),
constants.kFrontRightDriveInverted,
WPI_TalonFX(constants.kFrontRightSteerMotorId),
constants.kFrontRightSteerInverted,
CANCoder(constants.kFrontRightSteerEncoderId),
constants.kFrontRightAbsoluteEncoderOffset,
)
self.backLeftModule = CTRESwerveModule(
constants.kBackLeftModuleName,
WPI_TalonFX(constants.kBackLeftDriveMotorId),
constants.kBackLeftDriveInverted,
WPI_TalonFX(constants.kBackLeftSteerMotorId),
constants.kBackLeftSteerInverted,
CANCoder(constants.kBackLeftSteerEncoderId),
constants.kBackLeftAbsoluteEncoderOffset,
)
self.backRightModule = CTRESwerveModule(
constants.kBackRightModuleName,
WPI_TalonFX(constants.kBackRightDriveMotorId),
constants.kBackRightDriveInverted,
WPI_TalonFX(constants.kBackRightSteerMotorId),
constants.kBackRightSteerInverted,
CANCoder(constants.kBackRightSteerEncoderId),
constants.kBackRightAbsoluteEncoderOffset,
)
else:
self.frontLeftModule = PWMSwerveModule(
constants.kFrontLeftModuleName,
PWMVictorSPX(constants.kFrontLeftDriveMotorSimPort),
PWMVictorSPX(constants.kFrontLeftSteerMotorSimPort),
Encoder(*constants.kFrontLeftDriveEncoderSimPorts),
Encoder(*constants.kFrontLeftSteerEncoderSimPorts),
)
self.frontRightModule = PWMSwerveModule(
constants.kFrontRightModuleName,
PWMVictorSPX(constants.kFrontRightDriveMotorSimPort),
PWMVictorSPX(constants.kFrontRightSteerMotorSimPort),
Encoder(*constants.kFrontRightDriveEncoderSimPorts),
Encoder(*constants.kFrontRightSteerEncoderSimPorts),
)
self.backLeftModule = PWMSwerveModule(
constants.kBackLeftModuleName,
PWMVictorSPX(constants.kBackLeftDriveMotorSimPort),
PWMVictorSPX(constants.kBackLeftSteerMotorSimPort),
Encoder(*constants.kBackLeftDriveEncoderSimPorts),
Encoder(*constants.kBackLeftSteerEncoderSimPorts),
)
self.backRightModule = PWMSwerveModule(
constants.kBackRightModuleName,
PWMVictorSPX(constants.kBackRightDriveMotorSimPort),
PWMVictorSPX(constants.kBackRightSteerMotorSimPort),
Encoder(*constants.kBackRightDriveEncoderSimPorts),
Encoder(*constants.kBackRightSteerEncoderSimPorts),
)
self.modules = (
self.frontLeftModule,
self.frontRightModule,
self.backLeftModule,
self.backRightModule,
)
self.kinematics = SwerveDrive4Kinematics(
constants.kFrontLeftWheelPosition,
constants.kFrontRightWheelPosition,
constants.kBackLeftWheelPosition,
constants.kBackRightWheelPosition,
)
# Create the gyro, a sensor which can indicate the heading of the robot relative
# to a customizable position.
self.gyro = AHRS.create_spi()
# Create the an object for our odometry, which will utilize sensor data to
# keep a record of our position on the field.
self.odometry = SwerveDrive4Odometry(self.kinematics,
self.gyro.getRotation2d())
self.printTimer = Timer()
# self.printTimer.start()
self.returnPos = Pose2d(0, 0, 0)
def resetSwerveModules(self):
for module in self.modules:
module.reset()
self.odometry.resetPosition(Pose2d(), self.gyro.getRotation2d())
def periodic(self):
"""
Called periodically when it can be called. Updates the robot's
odometry with sensor data.
"""
self.odometry.update(
self.gyro.getRotation2d(),
self.frontLeftModule.getState(),
self.frontRightModule.getState(),
self.backLeftModule.getState(),
self.backRightModule.getState(),
)
if self.printTimer.hasPeriodPassed(constants.kPrintPeriod):
rX = self.odometry.getPose().translation().X()
rY = self.odometry.getPose().translation().Y()
rAngle = int(self.odometry.getPose().rotation().degrees())
flAngle = int(self.frontLeftModule.getSwerveAngle().degrees())
frAngle = int(self.frontRightModule.getSwerveAngle().degrees())
blAngle = int(self.backLeftModule.getSwerveAngle().degrees())
brAngle = int(self.backRightModule.getSwerveAngle().degrees())
flSpeed = self.frontLeftModule.getWheelLinearVelocity()
frSpeed = self.frontRightModule.getWheelLinearVelocity()
blSpeed = self.backLeftModule.getWheelLinearVelocity()
brSpeed = self.backRightModule.getWheelLinearVelocity()
print(
"r: {:.1f}, {:.1f}, {}* fl: {}* {:.1f} fr: {}* {:.1f} bl: {}* {:.1f} br: {}* {:.1f}"
.format(
rX,
rY,
rAngle,
flAngle,
flSpeed,
frAngle,
frSpeed,
blAngle,
blSpeed,
brAngle,
brSpeed,
))
def arcadeDriveWithFactors(
self,
forwardSpeedFactor: float,
sidewaysSpeedFactor: float,
rotationSpeedFactor: float,
coordinateMode: CoordinateMode,
) -> None:
"""
Drives the robot using arcade controls.
:param forwardSpeedFactor: the commanded forward movement
:param sidewaysSpeedFactor: the commanded sideways movement
:param rotationSpeedFactor: the commanded rotation
"""
# print(
# "inputs: x: {:.2f} y: {:.2f} *: {:.2f}".format(
# forwardSpeedFactor, sidewaysSpeedFactor, rotationSpeedFactor
# )
# )
chassisSpeeds = ChassisSpeeds(
forwardSpeedFactor * constants.kMaxForwardLinearVelocity,
sidewaysSpeedFactor * constants.kMaxSidewaysLinearVelocity,
rotationSpeedFactor * constants.kMaxRotationAngularVelocity,
)
self.arcadeDriveWithSpeeds(chassisSpeeds, coordinateMode)
def arcadeDriveWithSpeeds(self, chassisSpeeds: ChassisSpeeds,
coordinateMode: CoordinateMode) -> None:
robotChassisSpeeds = None
if coordinateMode is DriveSubsystem.CoordinateMode.RobotRelative:
robotChassisSpeeds = chassisSpeeds
elif coordinateMode is DriveSubsystem.CoordinateMode.FieldRelative:
robotChassisSpeeds = ChassisSpeeds.fromFieldRelativeSpeeds(
chassisSpeeds.vx,
chassisSpeeds.vy,
chassisSpeeds.omega,
self.odometry.getPose().rotation(),
)
moduleStates = self.kinematics.toSwerveModuleStates(robotChassisSpeeds)
(
frontLeftState,
frontRightState,
backLeftState,
backRightState,
) = SwerveDrive4Kinematics.normalizeWheelSpeeds(
moduleStates, constants.kMaxWheelLinearVelocity)
self.frontLeftModule.applyState(frontLeftState)
self.frontRightModule.applyState(frontRightState)
self.backLeftModule.applyState(backLeftState)
self.backRightModule.applyState(backRightState)
def rotatePoint(self, x: float, y: float, angle: float,
ccw: bool) -> Tuple[float, float]:
invert = ccw * 2 - 1
xNew = x * cos(angle) - y * sin(angle) * invert
yNew = x * sin(angle) + y * cos(angle) * invert
return xNew, yNew
def shiftPoint(self, savedPose: Pose2d, currentPose: Pose2d) -> Pose2d:
xDelta = savedPose.X() - currentPose.X()
yDelta = savedPose.Y() - currentPose.Y()
coords = self.rotatePoint(xDelta, yDelta,
currentPose.rotation().radians() * -1, True)
return Pose2d(coords[0], coords[1], 0)
``` |
{
"source": "1759749908/gzhu_no_clock_in",
"score": 3
} |
#### File: 1759749908/gzhu_no_clock_in/login.py
```python
import os
import re
import pickle
import msession
from ocr import ocr
def login(username: str, password: str):
session = msession.session
session.cookies.clear()
res = session.get(msession.urls.cas, verify=False)
lt = re.findall(r'name="lt" value="(.*)"', res.text)
captcha_url = msession.urls.captcha
captcha_path = 'captcha.jpg'
with session.get(captcha_url) as captcha:
with open(captcha_path, mode='wb') as captcha_jpg:
captcha_jpg.write(captcha.content)
captcha = ocr(captcha_path)
login_form = {
'username': username,
'password': password,
'captcha': captcha,
'warn': 'true',
'lt': lt[0],
'execution': 'e1s1',
'_eventId': 'submit',
'submit': '登录'
}
post_res = session.post(msession.urls.cas, data=login_form)
if '账号或密码错误' in post_res.text:
print ('账号或密码错误')
return
if '验证码不正确' in post_res.text:
print ('验证码不正确')
return
os.remove('captcha.jpg')
session.get(msession.urls.sso, verify=False)
cookies = session.cookies
if not os.path.exists('cookies'):
os.mkdir('cookies')
if not cookies:
print ('No cookies!')
else:
file_name = 'cookies' + os.sep + username
with open(file_name, mode='wb') as cookies_file:
pickle.dump(session.cookies, cookies_file)
```
#### File: 1759749908/gzhu_no_clock_in/ocr.py
```python
from PIL import Image
import pytesseract
def ocr(path):
img = Image.open(path).convert('L')
table = []
threshold= 150
for i in range(256):
table.append(1 if i > threshold else 0)
img_bin = img.point(table, '1')
text = pytesseract.image_to_string(img_bin, config='-c tessedit_char_whitelist=0123456789').strip()
#print (text)
return text
``` |
{
"source": "17605272633/ETMS",
"score": 2
} |
#### File: apps/attendance/models.py
```python
from django.db import models
from lesson.models import lesson_table
from users.models import student_table, teacher_table
class student_attendance_table(models.Model):
"""学生考勤表"""
STATUS_CHOICES = (
(1, "全勤"),
(2, "迟到"),
(3, "早退"),
(4, "缺勤"),
(5, "请假"),
)
# asid = models.IntegerField(primary_key=True, verbose_name="考勤id")
alesson = models.ForeignKey(lesson_table, on_delete=models.CASCADE, verbose_name="课程")
astime = models.DateTimeField(auto_now_add=True, verbose_name="日期")
asuser = models.ForeignKey(student_table, on_delete=models.CASCADE, verbose_name="学生")
asstatus = models.SmallIntegerField(choices=STATUS_CHOICES, default=1, verbose_name="考勤信息")
class Meta:
db_table = "t_s_attendance"
verbose_name = "学生考勤"
def __str__(self):
"""定义每个数据对象的显示信息"""
return self.astime
class teacher_attendance_table(models.Model):
"""教师考勤表"""
STATUS_CHOICES = (
(1, "全勤"),
(2, "迟到"),
(3, "早退"),
(4, "缺勤"),
(5, "请假"),
)
# atid = models.IntegerField(primary_key=True, verbose_name="考勤id")
alesson = models.ForeignKey(lesson_table, on_delete=models.CASCADE, verbose_name="课程")
attime = models.DateTimeField(auto_now_add=True, verbose_name="日期")
atuser = models.ForeignKey(teacher_table, on_delete=models.CASCADE, verbose_name="教师")
atstatus = models.SmallIntegerField(choices=STATUS_CHOICES, default=1, verbose_name="考勤信息")
class Meta:
db_table = "t_t_attendance"
verbose_name = "教师考勤"
def __str__(self):
"""定义每个数据对象的显示信息"""
return self.attime
```
#### File: apps/lesson/views.py
```python
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from lesson.serializers import LessonSerializer
from users.models import student_table
from users.serializers import StudentSerializer
from .models import lesson_table
class LessonSelectView(GenericAPIView):
"""
获取课程数据视图
路由:/lesson/lesson/
"""
def get(self, request):
"""查询所有"""
try:
lesson = lesson_table.objects.all()
except:
return Response({"error": "查询失败"})
# 序列化
lesson_serializer = LessonSerializer(lesson, many=True)
lesson_dict = lesson_serializer.data
# 返回
return Response(lesson_dict)
def post(self, request):
"""
按条件获取课程数据
请求参数: user_id = ? , userkind = ?
返回json: [
{
"lid": 809010001,
"lname": "C语言程序设计01",
"ltime": 12,
"lclass_id": 1530501,
"lclassroom_id": 101,
"lteacher_id": 80901001
},
...
]
"""
# 获取请求数据
querydict = request.data
userkind = querydict.getlist("userkind")[0]
user_id = querydict.getlist("user_id")[0]
# 教师
if userkind == "1":
# 按照教师id获取课程数据
condition_lesson = lesson_table.objects.filter(lteacher_id=user_id)
condition_serializer = LessonSerializer(condition_lesson, many=True)
return Response(condition_serializer.data)
# 学生
if userkind == "2":
# 获取该学生班号
stu_querydict = student_table.objects.filter(sid=user_id)
stu_info = StudentSerializer(stu_querydict, many=True).data
print(stu_info)
class_id = stu_info[0]["sclass"]
# 按照班级id获取课程数据
condition_lesson = lesson_table.objects.filter(lclass_id=class_id)
condition_serializer = LessonSerializer(condition_lesson, many=True)
return Response(condition_serializer.data)
class LessonApplyView(APIView):
"""
实验课程安排视图
路由:/lesson/lessonapply/
请求参数:
"""
def post(self, request):
"""创建实验课程"""
# 获取请求参数
lesson_apply_info = request.data
lid = lesson_apply_info.getlist("lid")[0]
lname = lesson_apply_info.getlist("lname")[0]
ltime = lesson_apply_info.getlist("ltime")[0]
# lstart = lesson_apply_info.getlist("lstart")[0]
lclass_id = lesson_apply_info.getlist("lclass_id")[0]
lclassroom_id = lesson_apply_info.getlist("lclassroom_id")[0]
lteacher_id = lesson_apply_info.getlist("lteacher_id")[0]
# 将获取的数据上在数据库创建
lesson_table.objects.create(
lid=lid,
lname=lname,
ltime=ltime,
lclass_id=lclass_id,
lclassroom_id=lclassroom_id,
lteacher_id=lteacher_id,
)
return Response({
"message": "ok"
})
class AllLessonView(APIView):
"""课表查询"""
def post(self, request):
"""
路由 POST /lesson/lessonall/
请求参数: lclass_id = ?
"""
# 获取请求数据
querydict = request.data
# 获取请求数据中的 教师id
lclass_id = querydict.getlist("lclass_id")[0]
# 在课程表中查询该教师的课数据,并序列化
class_lesson = lesson_table.objects.filter(lclass_id=lclass_id)
lesson_serializer = LessonSerializer(class_lesson, many=True)
# 返回教师课表数据
return Response(lesson_serializer.data)
```
#### File: apps/users/models.py
```python
from django.db import models
from classes.models import class_table
from lesson.models import lesson_table
class teacher_table(models.Model):
"""教师表"""
tid = models.IntegerField(primary_key=True, verbose_name="教师工号")
tname = models.CharField(max_length=20, verbose_name="教师名")
class Meta:
db_table = "t_teacher"
verbose_name = "教师表"
def __str__(self):
"""定义每个数据对象的显示信息"""
return self.tname
class student_table(models.Model):
"""学生表"""
sid = models.BigIntegerField(primary_key=True, verbose_name="学号")
sname = models.CharField(max_length=20, verbose_name="学生名")
sclass = models.ForeignKey(class_table, on_delete=models.CASCADE, verbose_name="所在班级")
lesson = models.ManyToManyField(lesson_table)
class Meta:
db_table = "t_student"
verbose_name = "学生表"
def __str__(self):
"""定义每个数据对象的显示信息"""
return self.sname
class super_table(models.Model):
"""超级管理员表"""
username = models.CharField(max_length=20, verbose_name="账号名")
password = models.CharField(max_length=20, verbose_name="密码")
class Meta:
db_table = "t_super"
verbose_name = "超级管理员表"
def __str__(self):
"""定义每个数据对象的显示信息"""
return self.username
```
#### File: apps/users/views.py
```python
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework.views import APIView
from users.models import super_table, teacher_table, student_table
from users.serializers import SuperSerializer, TeacherSerializer, StudentSerializer
class UserManagementView(APIView):
"""用户管理"""
def get(self, request):
"""返回"""
# 去 session 中取指定的值
user_id = request.session.get("user_id", None)
is_admin = request.session.get("is_admin", False)
# 如果用户id存在,并且是管理员,那么直接跳转管理后台主页
if user_id and is_admin:
return render(request, "html/super_index.html")
return render(request, "html/admin_login.html")
def post(self, request):
# 获取请求参数
user_info = request.data
username = user_info.getlist("username")[0]
password = user_info.getlist("password")[0]
userkind = user_info.getlist("userkind")[0]
# 判断是否存在
if userkind == "3":
# 超级管理员
try:
user = super_table.objects.filter(username=username)
except:
return Response({"error": "登陆错误"})
user_serializer = SuperSerializer(user, many=True)
# 获取序列化后的用户密码
user_data = user_serializer.data
# 从数据中获取数据
user_info = user_data[0]
user_password = user_info["password"]
if password != user_password:
return Response({"error": "登陆错误"})
else:
request.session['user_id'] = user_info["username"]
request.session['is_admin'] = True
return render(request, "html/super_index.html")
if userkind == "1":
# 教师
try:
user = teacher_table.objects.filter(tid=username)
except:
return Response({"error": "登陆错误"})
if not user:
return Response({"error": "登陆错误"})
if password != username:
return Response({"error": "登陆错误"})
else:
user_serializer = TeacherSerializer(user, many=True)
# 获取序列化后的数据
user_data = user_serializer.data
# 获取具体数据
user_info = user_data[0]
request.session['user_id'] = user_info["tid"]
request.session['is_admin'] = False
return render(request, "html/teacher_index.html")
if userkind == "2":
# 学生
try:
user = student_table.objects.filter(sid=username)
except:
return Response({"error": "登陆错误"})
if not user:
return Response({"error": "登陆错误"})
if password != username:
return Response({"error": "登陆错误"})
else:
user_serializer = StudentSerializer(user, many=True)
# 获取序列化后的数据
user_data = user_serializer.data
# 获取具体数据
user_info = user_data[0]
request.session['user_id'] = user_info["sid"]
request.session['is_admin'] = False
return render(request, "html/student_index.html")
class SupermanagementView(APIView):
"""超级管理员用户管理"""
def get(self, request, userkind):
"""查询所有"""
# 获取查询用户种类
# 教师
if userkind == "1":
try:
user = teacher_table.objects.all()
except:
return Response({"error": "查询失败"})
# 序列化
user_serializer = TeacherSerializer(user, many=True)
classroom_dict = user_serializer.data
# 返回
return Response(classroom_dict)
# 学生
elif userkind == "2":
try:
user = student_table.objects.all()
except:
return Response({"error": "查询失败"})
# 序列化
user_serializer = StudentSerializer(user, many=True)
user_dict = user_serializer.data
# 返回
return Response(user_dict)
def post(self, request):
"""创建"""
# 获取请求参数
user_info = request.data
# 获取创建的用户种类
userkind = user_info.getlist("userkind")[0]
# 教师
if userkind == "1":
# 获取其余信息
tid = user_info.getlist("tid")[0]
tname = user_info.getlist("tname")[0]
# 将获取的数据上在数据库创建
teacher_table.objects.create(
tid=tid,
tname=tname,
)
return Response({
"tid": tid,
"tname": tname,
})
elif userkind == "2":
# 获取其余信息
sid = user_info.getlist("sid")[0]
sname = user_info.getlist("sname")[0]
sclass_id = user_info.getlist("sclass_id")[0]
# 将获取的数据上在数据库创建
teacher_table.objects.create(
sid=sid,
sname=sname,
sclass_id=sclass_id,
)
return Response({
"sid": sid,
"sname": sname,
"sclass_id": sclass_id,
})
def put(self, request, userkind, id):
"""修改"""
# 教师
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = TeacherSerializer(user, data=user_dict)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = TeacherSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
# 学生
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = StudentSerializer(user, data=user_dict)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = StudentSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
def patch(self, request, userkind, id):
"""局部更新"""
# 教师
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = TeacherSerializer(user, data=user_dict, partial=True)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = TeacherSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
# 学生
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 接收
user_dict = request.data
# 验证
user_serilizer = StudentSerializer(user, data=user_dict, partial=True)
if not user_serilizer.is_valid():
return Response(user_serilizer.errors)
# 保存 update
user = user_serilizer.save()
# 响应
user_serilizer = StudentSerializer(user)
user_dict = user_serilizer.data
return Response(user_dict, status=201)
def delete(self, request, userkind, id):
"""删除"""
if userkind == "1":
try:
user = teacher_table.objects.get(tid=id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
if userkind == "2":
try:
user = student_table.objects.get(sid=id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
class UserLogOutView(APIView):
"""用户管理退出"""
def get(self, request):
request.session.pop('user_id', None)
request.session.pop('is_admin', None)
# 返回结果
return HttpResponseRedirect("http://www.etms.mp:8000/user/user/")
class StudentView(APIView):
"""
学生视图
"""
def post(self, request):
"""
创建学生信息
路由: POST /user/student/
"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
user_name = student_info.getlist("user_name")[0]
class_id = student_info.getlist("class_id")[0]
student_table.objects.create(
sid=user_id,
sname=user_name,
sclass_id=class_id
)
return Response({"message": "ok"})
def delete(self, request):
"""删除学生信息路由: DELETE /user/student/"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
try:
user = student_table.objects.get(sid=user_id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
class StudentFindView(APIView):
"""学生信息查询"""
def post(self, request):
classes_info = request.data
classes_id = classes_info.getlist("classes_id")[0]
print(classes_id)
user = student_table.objects.filter(sclass_id=classes_id)
print(user)
user_serializer = StudentSerializer(user, many=True)
# 获取序列化后的用户密码
user_data = user_serializer.data
return Response(user_data)
class TeacherView(APIView):
"""
教师视图
"""
def get(self, request):
teacher = teacher_table.objects.filter()
teacher_info = TeacherSerializer(teacher, many=True)
return Response(teacher_info.data)
def post(self, request):
"""
创建教师信息
路由: POST /user/teacher/
"""
student_info = request.data
user_id = student_info.getlist("user_id")[0]
user_name = student_info.getlist("user_name")[0]
teacher_table.objects.create(
tid=user_id,
tname=user_name,
)
return Response({"message": "ok"})
def delete(self, request):
"""
删除学生信息
路由: DELETE /user/teacher/
"""
teacher_info = request.data
user_id = teacher_info.getlist("user_id")[0]
try:
user = teacher_table.objects.get(tid=user_id)
except:
return Response({"error": "查询错误"})
# 删除
user.delete()
# 响应
return Response(status=204)
``` |
{
"source": "17605272633/ManyBeautifulMall",
"score": 2
} |
#### File: apps/carts/serializers.py
```python
from rest_framework import serializers
from goods.models import SKU
# 购物车数据添加序列化器
class AddCartSerializer(serializers.Serializer):
"""购物车数据添加序列化器"""
# 定义属性
sku_id = serializers.IntegerField(label='sku_id', min_value=1)
count = serializers.IntegerField(label='数量', min_value=1)
selected = serializers.BooleanField(label='是否勾选', default=True)
# 验证
def validate(self, attrs):
try:
sku = SKU.objects.get(id=attrs['sku_id'])
except:
raise serializers.ValidationError('查询商品SKU错误')
if sku is None:
raise serializers.ValidationError('未查询到数据')
else:
return attrs
# 购物车数据查询序列化器
class FindCartSerializer(serializers.ModelSerializer):
"""购物车数据查询序列化器"""
# 定义属性
count = serializers.IntegerField(label='数量')
selected = serializers.BooleanField(label='是否勾选')
class Meta:
model = SKU
fields = ('id', 'name', 'default_image_url', 'price', 'count', 'selected')
# 购物车数据修改序列化器
class UpDateCartSerializer(serializers.Serializer):
"""购物车数据修改序列化器"""
# 定义属性
sku_id = serializers.IntegerField(label='sku_id', min_value=1)
count = serializers.IntegerField(label='数量', min_value=1)
selected = serializers.BooleanField(label='是否勾选', default=True)
# 验证
def validate(self, attrs):
try:
sku = SKU.objects.get(id=attrs['sku_id'])
except:
raise serializers.ValidationError('查询商品SKU错误')
if sku is None:
raise serializers.ValidationError('未查询到数据')
else:
return attrs
# 购物车数据删除序列化器
class DeleteCartSerializer(serializers.Serializer):
"""购物车数据删除序列化器"""
# 定义属性
sku_id = serializers.IntegerField(label='商品id', min_value=1)
# 验证
def validate_sku_id(self, value):
# 获取商品信息,验证商品是否存在
try:
sku = SKU.objects.get(id=value)
except SKU.DoesNotExist:
raise serializers.ValidationError('商品不存在')
return value
# 购物车数据全选序列化器
class SelectAllCartSerializer(serializers.Serializer):
"""购物车数据全选序列化器"""
selected = serializers.BooleanField(label='全选')
```
#### File: apps/oauth/utils.py
```python
from urllib.parse import urlencode, parse_qs
from urllib.request import urlopen
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer, BadData
from django.conf import settings
import json
import logging
from oauth import constants
from oauth.exceptions import QQAPIError
logger = logging.getLogger('django')
class OAuthQQ(object):
"""QQ认证辅助工具类"""
def __init__(self, client_id=None, client_secret=None, redirect_uri=None, state=None):
self.client_id = client_id or settings.QQ_CLIENT_ID
self.client_secret = client_secret or settings.QQ_CLIENT_SECRET
self.redirect_uri = redirect_uri or settings.QQ_REDIRECT_URI
self.state = state or settings.QQ_STATE
def get_qq_login_url(self):
"""
获取qq登陆的网址(获取Authorization Code)
:return: url网址
"""
params = {
"response_type": 'code', # 授权类型,此值固定为“code”
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'state': self.state,
# 'scope': 'get_user_info', # 请求用户授权时向用户显示的可进行授权的列表
}
url = 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(params)
return url
def get_code(self, r_p):
"""
获取code
:return: code
"""
code = r_p.get('code')
return code
def get_access_token(self, code):
"""
获取access_token
:param code: qq提供的code
:return: access_token授权令牌
"""
params = {
'grant_type': 'authorization_code', # 授权类型
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri
}
# urlencode 将字典转成查询字符串
url = 'https://graph.qq.com/oauth2.0/token?' + urlencode(params)
# urlopen 发送http请求,默认发送get请求
response = urlopen(url)
# 返回response响应对象,可以通过read()读取响应体数据,为bytes类型
response_data = response.read().decode()
# parse_qs 将查询字符串格式数据转换为python的字典
data = parse_qs(response_data)
# 获取access_token
access_token = data.get('access_token', None)
if not access_token:
logger.error('code=%s msg=%s' % (data.get('code'), data.get('msg')))
raise QQAPIError
# 因为获取的列表中是有三段
# 分别为access_token(授权令牌), expires_in(该access token的有效期), refresh_token(获取新的Access_Token时需要提供的参数)
return access_token[0]
def get_open_id(self, access_token):
"""
获取用户的openid
:param access_token: qq提供的access_token授权令牌
:return: open_id(唯一对应用户身份的标识)
"""
url = 'https://graph.qq.com/oauth2.0/me?access_token=' + access_token
response = urlopen(url)
response_data = response.read().decode()
try:
# 检查返回数据的正误
# 返回包: callback({"client_id": "YOUR_APPID", "openid": "YOUR_OPENID"});
print(json.loads(response_data[10:-4]))
data = json.loads(response_data[10:-4])
print(data)
except Exception:
data = parse_qs(response_data)
logger.error('code=%s msg=%s' % (data.get('code'), data.get('msg')))
raise QQAPIError
openid = data.get('openid', None)
return openid
@staticmethod
def generate_save_user_token(openid):
"""
生成保存用户数据的token
:param openid: 用户的openid
:return: token
"""
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
data = {'openid': openid}
# serializer.dumps(数据), 返回bytes类型
token = serializer.dumps(data)
return token.decode()
@staticmethod
def check_seve_user_token(token):
"""
检查保存用户数据的token
:param token: 保存用户数据的token
:return: openid or None
"""
# 检查token, 会抛出itsdangerous.BadData异常
# Serializer(秘钥, 有效期秒)
serializer = Serializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
try:
data = serializer.loads(token)
except BadData:
return None
else:
return data.get('openid')
```
#### File: apps/oauth/views.py
```python
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from carts.utils import merge_cart_cookie_to_redis
from oauth.exceptions import QQAPIError
from oauth.models import OAuthQQUser
from oauth.serializers import OAuthQQUserSerializer
from oauth.utils import *
from utils import tjws, jwt_token
class QQAuthURLView(APIView):
"""获取QQ用户登录的url"""
def get(self, request):
"""
点击qq登陆,绑定的路由为/qq/authorization/?next=/, 路由绑定了此视图函数,视图函数返回json形式的,带参数的地址,客户端跳转此地址
请求地址: GET
提供用于qq登录的url
:param request: 包含: next 用户QQ登录成功后进入美多商城的哪个网址
:return: {
"login_url": 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(params)
}
"""
next = request.query_params.get('next')
oauth = OAuthQQ(state=next)
login_url = oauth.get_qq_login_url()
return Response({'login_url': login_url})
class QQAuthUserView(APIView):
"""QQ登陆的用户"""
def get(self, request):
"""
获取qq登陆的用户数据
:param request: 包含数据的请求
:return: response响应
"""
# 获取QQ返回的授权凭证
# code = oauth.get_code(request.query_params)
code = request.query_params.get('code')
if not code:
return Response({'message': '缺少code'}, status=status.HTTP_400_BAD_REQUEST)
oauth = OAuthQQ()
# 获取用户的access_token, openid
try:
access_token = oauth.get_access_token(code)
openid = oauth.get_open_id(access_token)
except QQAPIError:
return Response({'message': 'QQ服务异常'}, status=status.HTTP_503_SERVICE_UNAVAILABLE)
# 通过openid判断用户是否存在
try:
qquser = OAuthQQUser.objects.get(openid=openid)
except OAuthQQUser.DoesNotExist: # 不报错退出
# 如果不存在,则通知客户端转到绑定页面
# 用户第一次使用QQ登录
# 将openid加密存入token中
token = tjws.dumps({'openid': openid}, constants.SAVE_QQ_USER_TOKEN_EXPIRES)
return Response({
'access_token': token # 由序列化器接收,用于解密后获取openid
})
else:
# 找到用户,生成token
user = qquser.user
token = jwt_token.generate(user)
response = Response({
'token': token,
'user_id': qquser.id,
'username': qquser.user.username
})
# 合并购物车
response = merge_cart_cookie_to_redis(request, user, response)
return response
def post(self, request):
"""
绑用户接口
:param request: mobile手机号,password密码,sms_code短信验证码,access_token凭据
都在request.data中,传入了序列化器
:return: {
'token': user.token,
'user_id': user.id,
'username': user.username
}
"""
# 接收数据
serializer = OAuthQQUserSerializer(data=request.data)
# 验证
if not serializer.is_valid():
return Response({'message': serializer.errors})
# 保存
qquser = serializer.save()
# 生成token
user = qquser.user
token = jwt_token.generate(user)
# 响应
response = Response({
'token': token,
'user_id': qquser.id,
'username': qquser.user.username
})
# 合并购物车
response = merge_cart_cookie_to_redis(request, user, response)
return response
```
#### File: apps/orders/views.py
```python
from django_redis import get_redis_connection
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from decimal import Decimal
from goods.models import SKU
from orders.serializers import OrderSettlementSerializer, SaveOrderSerializer
# 订单结算类视图
class OrderSettlementView(APIView):
"""订单结算类视图"""
permission_classes = [IsAuthenticated]
def get(self, request):
"""
订单结算
GET /orders/settlement/
:param request: request.user当前用户
:return: {
"freight":"运费",
"skus":[ 结算的商品列表
{
"id": 商品id,
"name": 商品名称,
"default_image_url": 商品默认图片路径,
"price": 商品单价,
"count": 商品数量
},
......
]
}
"""
# 获取登陆的用户数据
user = request.user
# 获取购物车中被勾选的要结算的商品信息
redis_conn = get_redis_connection('cart')
# redis_cart_id = redis_conn.hkeys('cart_%s' % user.id) # 拿到当前用户所有购物车商品息
redis_cart_selected = redis_conn.smembers('cart_selected_%s' % user.id)
redis_cart_selected = [int(sku_id) for sku_id in redis_cart_selected] # 拿到被勾选的商品sku_id
# 拿到商品对应数量
cart_count = {}
"""
{
sku_id1 : count1,
sku_id2 : count2,
...
}
"""
for sku_id in redis_cart_selected:
cart_count[sku_id] = int(redis_conn.hget('cart_%s' % user.id, sku_id))
# 查询商品,添加数量属性
skus = SKU.objects.filter(pk__in=redis_cart_selected)
for sku in skus:
sku.count = cart_count[sku.id]
# 运费
freight = Decimal('10.00')
# 定义序列化数据格式
context = {
'freight': freight,
'skus': skus
}
serializer = OrderSettlementSerializer(context)
return Response(serializer.data)
# 订单保存类视图
class SaveOrderView(CreateAPIView):
"""订单保存类视图"""
permission_classes = [IsAuthenticated]
serializer_class = SaveOrderSerializer
```
#### File: apps/users/views.py
```python
from django_redis import get_redis_connection
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.generics import CreateAPIView, RetrieveAPIView, UpdateAPIView, GenericAPIView, ListCreateAPIView
from rest_framework.mixins import CreateModelMixin, UpdateModelMixin
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet, ModelViewSet
from rest_framework_jwt.views import ObtainJSONWebToken
from carts.utils import merge_cart_cookie_to_redis
from goods.models import SKU
from goods.serializers import SKUSerializer
from users.models import User
from users.serializers import CreateUserSerializer, UserDetailSerializer, EmailSerializer, UserAddressSerializer, \
AddressTitleSerializer, constants, EmailActiveSerializer, RecordUserBrowsingHistorySerializer
# 统计该用户名数量的类视图
class UsernameCountView(APIView):
"""统计该用户名数量的类视图"""
def get(self, request, username):
"""
统计该用户名数量
路由: GET usernames/(?P<username>\w{5,20})/count/
:param username: 用户名
:return: {'username':'xxxxx', 'count': 'x'} username用户名,count数量
"""
# 获取该用户名用户数量
count = User.objects.filter(username=username).count()
# 组织响应数据
data = {
'username': username,
'count': count
}
# 响应json数据
return Response(data)
# 统计该用户手机号数量的类视图
class MobileCountView(APIView):
"""统计该用户手机号数量的类视图"""
def get(self, request, mobile):
"""
统计该用户手机号数量
路由: GET mobiles/(?P<mobile>1[3-9]\d{9})/count/
:param mobile: 用户手机号
:return: {'mobile': 'xxxxxxxxxxx', 'count': 'x'} mobile手机号,count数量
"""
count = User.objects.filter(mobile=mobile).count()
data = {
'mobile': mobile,
'count': count
}
return Response(data)
# 用户注册类视图
class UserCreateView(CreateAPIView):
"""
用户注册类视图
CreateAPIView中有封装相对应的请求方式,创建新用户等代码,此视图函数中只需要定义serializer对象
路由: POST /users/
不需要查询集,只做创建不做查询
serializers.py中定义了CreateUserSerializer.此处只需要调用序列化器
"""
serializer_class = CreateUserSerializer
# 用户详情页类视图
class UserDetailView(RetrieveAPIView):
"""用户详情页类视图"""
# queryset = User.objects.all()
serializer_class = UserDetailSerializer
# 用户认证
permission_classes = [IsAuthenticated]
# 视图中封装好的代码,是根据主键查询得到对象
# 根据登陆的用户,显示个人信息
# get_object默认根据pk查询,重写get_object
def get_object(self): # self是序列化器对象,request.user代表登录用户
return self.request.user
# 保存用户邮箱
class EmailView(UpdateAPIView):
"""保存用户邮箱"""
serializer_class = EmailSerializer
# 必须是登陆过后的用户
permission_classes = [IsAuthenticated]
def get_object(self): # 返回当前登陆的用户数据
return self.request.user
# 邮箱验证
class VerifyEmailView(APIView):
"""邮箱认证"""
def get(self, request):
"""
请求方式: GET /emails/verification/?token=
:param request: 可用于获取查询字符串的请求
:return: {'message': 'OK'}
"""
# # 获取token
# token = request.query_params.get('token')
# if not token:
# return Response({'message': '缺少token'}, status=status.HTTP_400_BAD_REQUEST)
#
# # 验证token
# user = User.check_verify_email_token(token)
# if user is None:
# return Response({'message': '链接信息无效'}, status=status.HTTP_400_BAD_REQUEST)
# else:
# user.email_active = True
# user.save()
# 接收数据并验证
serializer = EmailActiveSerializer(data=request.query_params)
if not serializer.is_valid():
return Response(serializer.errors)
# 查询当前用户,并修改属性
user = User.objects.get(pk=serializer.validated_data.get('user_id'))
user.email_active = True
user.save()
# 响应
return Response({'message': 'OK'})
# 用户地址 Z S G C
class AddressViewSet(ModelViewSet):
"""
用户地址的新增于修改
POST /addresses/ 新建 -> create
PUT /addresses/<pk>/ 修改 -> update
GET /addresses/ 查询 -> list
DELETE /addresses/<pk>/ 删除 -> destroy
PUT /addresses/<pk>/status/ 设置默认 -> status
PUT /addresses/<pk>/title/ 设置标题 -> title
"""
serializer_class = UserAddressSerializer
permission_classes = [IsAuthenticated]
# 重写获取查询集
def get_queryset(self):
# 获取当前用户所有没被删除的地址
return self.request.user.addresses.filter(is_deleted=False)
def list(self, request, *args, **kwargs): # 等同于 GET 的查询所有
"""
用户地址列表数据
访问路径: GET /addresses/
:return:
"""
addr_queryset = self.get_queryset()
addr_serializer = self.get_serializer(addr_queryset, many=True) # 因为是一对多,所以要many = True
user = self.request.user
# 格式固定,因为list本身返回结果不满足js要求
return Response({
'user_id': user.id,
'default_address_id': user.default_address_id,
'limit': constants.USER_ADDRESS_COUNTS_LIMIT,
'addresses': addr_serializer.data,
})
def create(self, request, *args, **kwargs): # 等同于 POST 的创建
"""
保存用户地址数据
访问路径: POST /addresses/
:return:
"""
# 因为设置了地址上限,所以判断
count = request.user.addresses.count()
if count >= constants.USER_ADDRESS_COUNTS_LIMIT:
return Response({'message': '保存地址数据已达到上限'}, status=status.HTTP_400_BAD_REQUEST)
return super().create(request, *args, **kwargs)
# 修改的update方法已在ModelViewSet中完成
def destroy(self, request, *args, **kwargs): # 等同于 DELETE 的删除
"""
删除用户地址数据(逻辑删除)
:return:
"""
# 根据主键查询对象
user_address = self.get_object()
# 逻辑删除
user_address.is_deleted = True
user_address.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(methods=['put'], detail=True)
def status(self, request, pk=None):
"""
设置默认地址
访问路径: PUT /addresses/pk/status/
:param pk: 主键
:return: {'message': 'OK'}
"""
# 获取当前地址数据
address = self.get_object()
# 将默认地址设置为当前地址
request.user.default_address = address
request.user.save()
return Response({'message': 'OK'}, status=status.HTTP_200_OK)
@action(methods=['put'], detail=True)
def title(self, request, pk=None):
"""
修改标题
访问路径: PUT /addresses/pk/title/
:param pk: 主键
:return:
"""
# 获取当前地址数据
address = self.get_object()
# addr_serializer = AddressTitleSerializer(instance=address, data=request.data)
# if addr_serializer.is_valid() is False:
# return Response({'message': addr_serializer.errors})
# addr_serializer.save()
# return Response(addr_serializer.data)
address.title = request.data.get('title')
address.save()
return Response({'title': address.title})
# 用户浏览记录
class UserBrowsingHistoryView(ListCreateAPIView):
"""
记录用户浏览记录
请求方式: POST /browse_histories/
"""
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
# 创建与查询列表使用不同的序列化器
if self.request.method == 'GET':
return SKUSerializer
else:
return RecordUserBrowsingHistorySerializer
def get_queryset(self):
"""
获取浏览历史记录
:return: id, name, price, default_image_url, comments
"""
# 获取当前登陆的用户id
user_id = self.request.user.id
# 创建redis对象,在redis中查询历史记录数据
redis_conn = get_redis_connection("history")
# history是sku_id的集合
history = redis_conn.lrange("history_%s" % user_id, 0, -1)
skus = []
# 为了保持查询出的顺序与用户的浏览历史保存顺序一致
# 因为redis中list的键值天剑是按照先后的,所以和用户浏览的顺序一致
for sku_id in history:
skus.append(SKU.objects.get(pk=sku_id))
# print(skus)
return skus # [sku,sku,sku,...]
# 用户登陆(合并购物车)
class UserAuthorizeView(ObtainJSONWebToken):
"""
用户认证
"""
def post(self, request, *args, **kwargs):
# 调用父类的方法,获取drf jwt扩展默认的认证用户处理结果
response = super().post(request, *args, **kwargs)
# 仿照drf jwt扩展对于用户登录的认证方式,判断用户是否认证登录成功
serializer = self.get_serializer(data=request.data)
# 获取用户id
user_id = response.data.get('user_id')
# 如果用户登录认证成功,则合并购物车
if serializer.is_valid():
# user = serializer.validated_data.get('user')
response = merge_cart_cookie_to_redis(request, user_id, response)
return response
``` |
{
"source": "17621192638/flaskfd",
"score": 2
} |
#### File: flaskfd/controller/user_controller.py
```python
from flask_restful import Resource, request
from service.user_service import user_service
import utils.response_util as response_util
service =user_service()
class do_login(Resource):
@response_util.response_filter_v2
def post(self):
text_json = request.get_json()
return service.do_login(text_json)
class update_user(Resource):
"""更新用户信息"""
@response_util.response_filter_v2
def post(self):
text_json = request.get_json()
return service.update_user(text_json)
class select_users(Resource):
"""查询用户信息"""
@response_util.response_filter_v2
def post(self):
text_json = request.get_json()
return service.select_users(text_json)
class test_group(Resource):
@response_util.response_filter_v2
def post(self):
text_json = request.get_json()
return service.test_group(text_json)
```
#### File: mysql/localhost/pool.py
```python
import utils.mysql.mysql_common_util as mysql_util
import configparser,os,time
cf = configparser.ConfigParser()
cf.read(os.path.dirname(__file__)+"/../conf.ini")
# conf对应的数据库key
key = "localhost"
# 创建当前数据库的连接池对象
class service(object):
def __init__(self):
environment = cf.get(key,"environment")
for i in range(3):
print("当前mysql运行环境: {} !!!!".format(environment))
time.sleep(0.1)
self.pool = mysql_util.get_mysql_pool(
host=cf.get(key,"host"),
port=cf.get(key,"port"),
user=cf.get(key,"user"),
password=<PASSWORD>(key,"<PASSWORD>"),
database=cf.get(key,"db"),
charset="utf8mb4"
)
self.conn, self.cursor = mysql_util.get_db_from_pool(pool=self.pool)
s = service()
run_sql = mysql_util.get_wrapper(s.pool)
run_sql_v2 = mysql_util.get_wrapper_v2(s.pool)
from utils.mysql.common_dao import common_dao as common
# 给公共方法加上带pool的注释器,无法通过init方法传递注释器
class common_dao(common):
def __init__(self,table_name):
super().__init__(table_name=table_name)
escape_none_keys = ["status","email","phone","text","sentiment","img_url","dms_name","twords"]
def move_none_keys(self,**kwargs):
"""移除不接受None的key"""
model = kwargs.get("model",None)
if model:
escape_keys = [k for k,v in model.items() if (v ==None or v=="") and k in self.escape_none_keys]
for k in escape_keys: del model[k]
@mysql_util.pymysql_time_deal
@run_sql(fetch_type="all")
def select(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select(*args, **kwargs)
@mysql_util.pymysql_time_deal
@run_sql(fetch_type="one")
def select_one(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select_one(*args, **kwargs)
@run_sql(fetch_type=None)
def update_by_id(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().update_by_id(*args, **kwargs)
@run_sql_v2(fetch_type=None)
def update_by_id_v2(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().update_by_id_v2(*args, **kwargs)
@run_sql(fetch_type=None)
def insert(self,*args, **kwargs):
return super().insert(*args, **kwargs)
@run_sql(fetch_type="one")
def select_total(self,*args, **kwargs):
self.move_none_keys(**kwargs)
return super().select_total(*args, **kwargs)
@run_sql(fetch_type=None)
def delete_by_id(self,*args, **kwargs):
return super().delete_by_id(*args, **kwargs)
@run_sql(fetch_type=None)
def delete(self,*args, **kwargs):
return super().delete(*args, **kwargs)
@run_sql_v2(fetch_type="all")
def insert_v2(self,*args, **kwargs):
return super().insert_v2(*args, **kwargs)
if __name__ == '__main__':
pass
```
#### File: flaskfd/service/user_service.py
```python
import dao.mysql.localhost.user_dao as user_dao
import utils.common_util as common_util
import utils.decorator_util as decorator_util
class user_service:
@decorator_util.decorator_none
def do_login(self,text_json):
users = user_dao.select(model={"username":text_json["username"],"password":text_json["password"],"status":0})
if not users:return {"code":10001}
else: return users[0]
def update_user(self,text_json):
res = user_dao.update_by_id(model=text_json)
return res
def select_users(self,text_json):
data = user_dao.select(model=text_json,fields=["id","username","create_time"])
total = user_dao.select_total(model=text_json)
return {"total":total["total"],"data":data,"page_num":text_json["page_num"],"page_size":text_json["page_size"]}
def test_group(self,text_json):
data = user_dao.test_group1()
# 或 data = user_dao.test_group2()
return data
if __name__ == '__main__':
pass
``` |
{
"source": "17621368758/tranpathPY",
"score": 3
} |
#### File: tranpathPY/api_view/models.py
```python
import pymysql
def _mysql(sql):
# 打开并配置数据库
#conn = pymysql.connect(
# host='localhost',
# db='sql_order',
# port=3306,
# user='root',
# passwd='<PASSWORD>',
# charset='utf8'
#)
conn = pymysql.connect(
host='172.16.17.32',
db='AiTaoDB',
port=3306,
user='root',
passwd='<PASSWORD>',
charset='utf8'
)
cursor = conn.cursor()
try:
cursor.execute(sql)
# 提交数据库
conn.commit()
print('数据保存成功')
except Exception as e:
print(e)
print('数据保存失败')
else:
return cursor.fetchall()
finally:
conn.close()
```
#### File: system/views/viewsCode.py
```python
from django.shortcuts import render
from django.shortcuts import HttpResponse
import os
import json
import xlrd
import xlwt
import xlutils
import datetime
import random
import time
from system import models
def code(request):
purviewCode = request.POST.get("purviewCode","-1")
if purviewCode == "-1":
hasPurview = "#"
else:
hasPurview = ""
folder = os.path.abspath(os.path.join(os.getcwd(), ""))+"/static/dbDesign"
#print(folder)
sheetName = ""
L = []
for root, dirs, files in os.walk(folder):
for file in files:
if os.path.splitext(file)[1] == '.xls' or os.path.splitext(file)[1] == '.xlsx':
#L.append(os.path.join(file))
fileFullPath = os.path.join(root,file)
workbook = xlrd.open_workbook(fileFullPath)#打开execel
sheetNames = workbook.sheet_names()
#print(sheetNames)
for sheet in sheetNames:
L.append({
"id": sheet + "|"+file,
"text": sheet + "["+file+"]",
})
hasSorts = "F"#是否有sorts字段
hasSellerId = ""#是否有sellerId字段(判断唯一值时使用)
updateFieldsStr = ""#用户更新字段列表
keywordsSearchStr = ""#keywords查询自动拼装(字符串型数据)
fieldList = []
jsValidate_rules = "rules:{ //配置验证规则,key就是被验证的dom对象,value就是调用验证的方法(也是json格式)\n" # 表单验证规则
jsValidate_messages = "messages:{\n"#表单验证提示
js_init = ""#初始化dom对象,如select的option
tableNameFull = request.POST.get("tableNameFull","")
tableNameChinese = ""#表的中文名(如:菜单,用户,权限)
if tableNameFull != "":
sheetName,tableNameChinese,file = tableNameFull.split("|")
#print(file)
#print(sheetName)
fileFullPath = os.path.join(folder, file)
print(fileFullPath)
workbook = xlrd.open_workbook(fileFullPath) # 打开execel
sheetNames = workbook.sheet_names()
sheet = workbook.sheet_by_name(sheetName+"|"+tableNameChinese)
rowsCount = sheet.nrows
#colsCount = sheet.ncols
for i in range(1, rowsCount):
rows = sheet.row_values(i) # 单行数据
fieldName = rows[0] #字段名
fieldDataType = rows[1] #字段数据类型
fieldLength = rows[2] #字段长度
fieldForeignTable = rows[3] #外键表名
fieldAllowEmpty = rows[4] #允许空
fieldDefault = rows[5] #默认值
fieldMemo = rows[6] #字段说明
fieldDjangoExtend = rows[7] #django额外属性
fieldIfForm = rows[8] #是否表单填写
fieldIfShowInTable = rows[9] #表单显示字段标题
fieldTitle = rows[10] #表单显示字段标题
fieldMust = rows[11] #是否必填
fieldExtendLimit = rows[12] #额外限制
fieldInputType = rows[13] #输入类型
fieldIfMulti = rows[14] #是否多值
fieldUnique = rows[15] #是否唯一
fieldDataForSelect = rows[16] #可选数据
if fieldName == "sorts":
hasSorts = "T"
if fieldName == "sellerId":
hasSellerId = "T"
if fieldIfForm == "T":
if updateFieldsStr == "":
updateFieldsStr = fieldName +"="+ fieldName
else:
updateFieldsStr += "," + fieldName + "=" + fieldName
#Q(purname__contains=keywords) | Q(purcode__contains=keywords)
if fieldDataType == "CharField":
if keywordsSearchStr == "":
keywordsSearchStr = "Q("+fieldName +"__contains=keywords)"
else:
keywordsSearchStr += " | " + "Q("+fieldName +"__contains=keywords)"
dataForSelect = ""
if fieldDataForSelect != "":
tempJson = json.loads(fieldDataForSelect)
dataForSelect = str(tempJson.get("data","")).replace("'","\"")
ac = ""
if fieldInputType in ["select"]:
ac = "选择"
elif fieldInputType in ["text","textarea","password"]:
ac = "输入"
else:
ac = "设置"
if fieldMust == "T":
if jsValidate_messages != "messages:{\n":
jsValidate_rules += ",\n"
jsValidate_rules += "\t" + fieldName + ":{\n"
jsValidate_rules += "\t\trequired:true //必填。如果验证方法不需要参数,则配置为true\n"
jsValidate_rules += "\t}"
if jsValidate_messages != "messages:{\n":
jsValidate_messages += ",\n"
jsValidate_messages += "\t" + fieldName + ":{\n"
jsValidate_messages += "\t\trequired:\"请" + ac + fieldTitle + "!\"\n"
jsValidate_messages += "\t}"
#{"data":[{"id":"3","text":"系统级"},{"id":"2","text":"商家级"},{"id":"1","text":"客服级"}]}
fieldMemoStr = "verbose_name='"
if (fieldTitle == ""):
fieldMemoStr += fieldMemo
else:
fieldMemoStr += fieldTitle
fieldMemoStr += "'"
if (fieldLength !=""):
fieldMemoStr += ",max_length="
fieldMemoStr += str(int(fieldLength))
if (fieldAllowEmpty == "T"):
fieldMemoStr += ",null=True"
if (fieldForeignTable != ""):
fieldMemoStr = fieldForeignTable + ",related_name=\""+sheetName+"_"+fieldName+"\",on_delete=models.SET_NULL" + "," + fieldMemoStr
if (fieldDjangoExtend != ""):
fieldMemoStr += "," + fieldDjangoExtend
if dataForSelect!="":
fieldMemoStr += ",help_text='{\"data\":" + dataForSelect +"'"
else:
fieldMemoStr += ",help_text=''"
fieldInputStr = ""
if fieldInputType == "text":
fieldInputStr = "<input type='text' id='" + fieldName + "' name='" + fieldName + "' class='form-control input-width-large'>"
elif fieldInputType == "textarea":
fieldInputStr = "<textarea id='" + fieldName + "' name='" + fieldName + "' rows=5 class='form-control input-width-large'></textarea>"
elif fieldInputType == "password":
fieldInputStr = "<input type='password' id='" + fieldName + "' name='" + fieldName + "' class='form-control input-width-large'>"
elif fieldInputType == "select":
fieldInputStr = "<select id='" + fieldName + "' name='" + fieldName + "' class='form-control input-width-large'></select>"
js_init += "$(\"#" + fieldName + "\").select2({\n"
js_init += "\tplaceholder:\"请选择\",\n"
js_init += "\topenOnEnter:false,\n"
js_init += "\tminimumResultsForSearch: -1,\n"
if (dataForSelect!=""):
js_init += "\tdata:" + dataForSelect + "\n"
else:
js_init += "\tdata:[]\n"
js_init += "});\n"
js_init += "$(\"#" + fieldName + "\").val('').trigger('change');\n\n"
elif fieldInputType == "radio" or fieldInputType == "checkbox":
#print(fieldDataForSelect)
if fieldDataForSelect != "":
obj = json.loads(fieldDataForSelect)
i = 0
for o in obj["data"]:
checked = ""
#print(fieldDefault+"-"+o["id"])
if o["id"] == fieldDefault:
checked = "checked"
fieldInputStr += "<label class='radio-inline'>\n"
fieldInputStr += "\t\t\t\t\t<input type='" + fieldInputType + "' class='' id='" + fieldName+ str(i) + "' name='" + fieldName + "' value='" + o["id"] + "' " + checked + ">"
fieldInputStr += o["text"] + "\n"
fieldInputStr += "\t\t\t\t</label>"
i += 1
else:
fieldInputStr = "<label class='radio-inline'>\n"
fieldInputStr += "\t\t\t\t\t<input type='" + fieldInputType + "' class='' id='" + fieldName + "' name='" + fieldName + "' value='' checked>"
fieldInputStr += "选项一\n"
fieldInputStr += "\t\t\t\t</label>"
elif fieldInputType == "datetime":
fieldInputStr = "<input type='text' value='' id='" + fieldName + "' name='" + fieldName + "' data-date-format='yyyy-mm-dd' class='form-control input-width-small'>"
if js_init == "":
js_init += "\n"
js_init += "$(\"#" + fieldName + "\").datetimepicker({\n"
js_init += "\tcontainer:'#modalAdd .modal-content',\n"
js_init += "\tautoclose:true,\n"
js_init += "\tlanguage:'zh-CN',\n"
js_init += "\ttodayHighlight:true,\n"
js_init += "\ttodayBtn:true,\n"
js_init += "\tminView:2\n"
js_init += "});\n"
elif fieldInputType == "date":
fieldInputStr = "<input type='text' value='' id='" + fieldName + "' name='" + fieldName + "' data-date-format='yyyy-mm-dd' class='form-control input-width-small'>"
if js_init == "":
js_init += "\n"
js_init += "$(\"#" + fieldName + "\").datetimepicker({\n"
js_init += "\tcontainer:'#modalAdd .modal-content',\n"
js_init += "\tautoclose:true,\n"
js_init += "\tlanguage:'zh-CN',\n"
js_init += "\ttodayHighlight:true,\n"
js_init += "\ttodayBtn:true,\n"
js_init += "\tminView:2\n"
js_init += "});\n"
elif fieldInputType == "time":
fieldInputStr = "<input type='text' value='' id='" + fieldName + "' name='" + fieldName + "' data-date-format='yyyy-mm-dd' class='form-control input-width-small'>"
if js_init == "":
js_init += "\n"
js_init += "$(\"#" + fieldName + "\").datetimepicker({\n"
js_init += "\tcontainer:'#modalAdd .modal-content',\n"
js_init += "\tautoclose:true,\n"
js_init += "\tlanguage:'zh-CN',\n"
js_init += "\ttodayHighlight:false,\n"
js_init += "\ttodayBtn:false,\n"
js_init += "\tstartView: 1,\n"
js_init += "\tminView: 0,\n"
js_init += "\tmaxView: 1,\n"
js_init += "\tforceParse: 0\n"
js_init += "});\n"
else:
fieldInputStr = fieldName
fieldList.append({
"fieldName": fieldName,
"fieldDataType": fieldDataType,
"fieldLength": fieldLength,
"fieldForeignTable": fieldForeignTable,
"fieldAllowEmpty": fieldAllowEmpty,
"fieldDefault": fieldDefault,
"fieldMemo": fieldMemo,
"fieldDjangoExtend": fieldDjangoExtend,
"fieldIfForm": fieldIfForm,
"fieldIfShowInTable": fieldIfShowInTable,
"fieldTitle": fieldTitle,
"fieldMust": fieldMust,
"fieldExtendLimit": fieldExtendLimit,
"fieldInputType": fieldInputType,
"fieldIfMulti": fieldIfMulti,
"fieldUnique": fieldUnique,
"fieldDataForSelect": fieldDataForSelect,
"fieldMemoStr": fieldMemoStr,
"fieldInputStr": fieldInputStr,
})
jsValidate_rules += "\n},"
jsValidate_messages += "\n},"
#print(L)
#return render(request, 'system/myTool/code.html', {"data": L})
nameSpace,extend = file.split(".")
#hasSorts = "F" # 是否有sorts字段
#hasSellerId = "F" # 是否有sellerId字段
#updateFieldsStr = "" # 用户更新字段列表
#keywordsSearchStr = ""#keywords查询自动拼装(字符串型数据)
codeLeftTag = "{"
codeRightTag = "}"
objName = ""
if (sheetName!=""):
objName = sheetName[0].upper() + sheetName[1:]
strings = {"tableName":sheetName,"tableNameChinese":tableNameChinese,"objName": objName,
"nameSpace":nameSpace,"now":time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
"hasSorts":hasSorts,"hasSellerId":hasSellerId,"hasPurview":hasPurview,
"updateFieldsStr":updateFieldsStr,"keywordsSearchStr":keywordsSearchStr,"codeLeftTag":codeLeftTag,"codeRightTag":codeRightTag}
purviewQS = models.Purview.objects.filter(deletetime=None).order_by("sorts")
purviewList = []
purviewList.append({
"id": "-1",
"text": "不设置权限"
})
for purview in purviewQS:
purviewList.append({
"id":purview.purcode,
"text": purview.purname+"("+purview.purcode+")"
})
return render(request, 'system/myTool/code.html', {"data":json.dumps(L), "tableNameFull":tableNameFull, "purviewList":json.dumps(purviewList), "purviewCode":purviewCode, "strings":strings, "fieldList":fieldList, "jsValidate_rules":jsValidate_rules, "jsValidate_messages":jsValidate_messages, "js_init":js_init})
```
#### File: system/views/viewsExcel.py
```python
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.shortcuts import HttpResponseRedirect
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from django.db import transaction
from django.db import connection
import os
import json
import xlrd
import xlwt
import xlutils
import datetime
import random
import time
from system import models
#返回文件扩展名
def file_extension(path):
return os.path.splitext(path)[1]
#执行sql返回影响行数或记录数
def executeSql(sqlStr):
with connection.cursor() as cursor:
cursor.execute(sqlStr)
row = cursor.fetchone()
return row
def excel(request):
return render(request, 'system/myTool/excel.html')
@csrf_exempt
def ajaxUpload(request):
if request.method == "POST": # 请求方法为POST时,进行处理
myFile = request.FILES.get("file", None) # 获取上传的文件,如果没有文件,则默认为None
tableNameStr =request.POST.get("tableName","")
if tableNameStr!="":
tableNameStr = "temp" + tableNameStr.capitalize()
if not myFile:
return JsonResponse({"result":"no files for upload!"})
os.getcwd()
preFolder = os.path.abspath(os.path.join(os.getcwd(), ""))
filenameNew = datetime.datetime.now().strftime('%Y%m%d%H%M%S') + str(random.randint(0, 999999))#文件重命名
filenameNew += file_extension(myFile.name)
destination = open(os.path.join(preFolder+"/static/upload", filenameNew), 'wb+') # 打开特定的文件进行二进制的写操作
for chunk in myFile.chunks(): # 分块写入文件
destination.write(chunk)
destination.close()
obj = models.Excel_import_file_main.objects.create()
obj.filenameOriginal=myFile.name
obj.filenameSaved = filenameNew
obj.tableName = tableNameStr
obj.save()
return JsonResponse({"result":"upload over!"})
@csrf_exempt
def ajaxSetTableName(request):
data = request.POST
count = 0
#print(data)
if data!="":
id = data["id"]
tableName = data["tableName"]
# jsonStr = str(data.dict()).replace('\'', '\"')
# obj = json.loads(jsonStr)
# id = obj["id"]
# tableName = obj["tableName"]
tableName = "temp" + tableName.capitalize()
count = models.Excel_import_file_main.objects.filter(id=id).update(tableName=tableName)
return HttpResponse(count)
else:
return HttpResponse("0")
#获取excel列表名(字段名)
@csrf_exempt
def ajaxGetTableFields(request):
data = request.POST
id = data["id"]
if id!="":
model = models.Excel_import_file_main.objects.filter(id=id)[0]
if model:
filenameSaved = model.filenameSaved
folder = os.path.abspath(os.path.join(os.getcwd(), ""))
filepathFull = folder + '/static/upload/' + filenameSaved
workbook = xlrd.open_workbook(filepathFull)
#sheet1Nname = workbook.sheet_names()[1]#根据下标获取sheet名称
#根据sheet索引或者名称获取sheet内容,同时获取sheet名称、行数、列数
#sheet2 = workbook.sheet_by_index(1)
#sheet2 = workbook.sheet_by_name('Sheet2')
#print(sheet2.name, sheet2.nrows, sheet2.ncols)
#根据sheet名称获取整行和整列的值
#sheet2 = workbook.sheet_by_name('Sheet2')
#rows = sheet2.row_values(3)
#cols = sheet2.col_values(2)
#print rows
#print cols
#那么如果是在脚本中需要获取并显示单元格内容为日期类型的,可以先做一个判断。判断ctype是否等于3,如果等于3,则用时间格式处理:
#if (sheet.cell(row, col).ctype == 3):
# date_value = xlrd.xldate_as_tuple(sheet.cell_value(row, col), book.datemode)
# date_tmp = date(*date_value[:3]).strftime('%Y/%m/%d')
sheet = workbook.sheet_by_index(0)
rows = sheet.row_values(0)#原始列名
#数据类型
colTypeNameList = ""
colsCount = sheet.ncols
i = 0
d = {"0": "empty", "1": "string", "2": "number", "3": "date", "4": "boolean", "5": "error"}
while i < colsCount:
colType = str(sheet.cell(1, i).ctype)
colTypeName = d[colType]
#print(colType)
#print(colTypeName)
if colTypeNameList == "":
colTypeNameList = colTypeName
else:
colTypeNameList += "|"+colTypeName
i += 1
return JsonResponse({"colsList":'|'.join(rows),"colTypeNameList":colTypeNameList})
#保存字段名
@csrf_exempt
def ajaxSaveTableFields(request):
data = request.POST
excelImportFileMainId = data["excelImportFileMainId"]
fieldCount = data["fieldCount"]
i = 0
try:
with transaction.atomic():
while i < int(fieldCount):
fieldNameNew = data["fieldNameNew" + str(i)]
fieldNameOriginal = data["fieldNameOriginal" + str(i)]
colTypeName = data["colTypeName" + str(i)]
modelList = models.Excel_import_file_fields_name.objects.filter(
excelImportFileMainId=excelImportFileMainId, fieldSn=i)
if modelList:
model = modelList[0]
models.Excel_import_file_fields_name.objects.filter(id=model.id).update(fieldNameNew=fieldNameNew)
else:
obj = models.Excel_import_file_fields_name.objects.create()
obj.excelImportFileMainId = models.Excel_import_file_main.objects.filter(id=excelImportFileMainId)[0]
obj.fieldSn = i
obj.fieldNameOriginal = fieldNameOriginal
obj.fieldNameNew = fieldNameNew
obj.colType = colTypeName
obj.save()
i += 1
models.Excel_import_file_main.objects.filter(id=excelImportFileMainId).update(setFieldNameTime=datetime.datetime.now())
except Exception as e:
print(e)
return HttpResponse("执行出现错误!")
return HttpResponse("T")
def excel_import_file_main_list(request):
keywords = request.GET.get('keywords','')
#sortName = tableName & sortOrder = desc
sortName = request.GET.get('sortName','')
sortOrder = request.GET.get('sortOrder','')
orderBy = "-id"
if sortName != "":
if sortOrder == "" or sortOrder == "asc":
orderBy = sortName
else:
orderBy = "-"+sortName
'''服务端分页时,前端需要传回:limit(每页需要显示的数据量),offset(分页时 数据的偏移量,即第几页)'''
'''mysql 利用 limit语法 进行分页查询'''
'''服务端分页时,需要返回:total(数据总量),rows(每行数据) 如: {"total": total, "rows": []}'''
pageIndex = request.GET.get('pageIndex',1)
pageSize = request.GET.get('pageSize',20)
#print(pageIndex)
start = (int(pageIndex)-1) * int(pageSize)
end = int(pageIndex) * int(pageSize)
# 符合条件的总记录数
if keywords == "":
total = models.Excel_import_file_main.objects.all().count()
results = models.Excel_import_file_main.objects.order_by(orderBy).all()[start:end]
else:
total = models.Excel_import_file_main.objects.filter(filenameOriginal__contains=keywords).count()
results = models.Excel_import_file_main.objects.filter(filenameOriginal__contains=keywords).order_by(orderBy).all()[start:end]
returnData = {"total": total, "rows": []} #########非常重要############
for result in results:
#加载默认字段名列表
fields = models.Excel_import_file_fields_name.objects.filter(excelImportFileMainId=result.id).order_by("fieldSn")
defaultFieldsName = ""
if fields:
for f in fields:
if (defaultFieldsName == ""):
defaultFieldsName += f.fieldNameNew
else:
defaultFieldsName += "|"+f.fieldNameNew
returnData['rows'].append({
"id": result.id,
"filenameOriginal": result.filenameOriginal,
"filenameSaved": result.filenameSaved,
"tableName": result.tableName,
#"addTime": time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime(result.addTime)),
"addTime": str(result.addTime),
"adder": result.adder,
"setFieldNameTime": str(result.setFieldNameTime),
"setFieldNamer": result.setFieldNamer,
"importTime": str(result.importTime),
"importer": result.importer,
"defaultFieldsName":defaultFieldsName,
#"PurchaseTime": time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime(results['purchasetime'])),
# 将 时间戳 转换为 UTC时间
})
# 最后用dumps包装下,json.dumps({"rows": [{"gameorderid": 1}, {"gameorderid": 22}]})
return HttpResponse(json.dumps(returnData))
#excel预览
def excel_import_file_preview(request):
returnData = {} #########非常重要############
id = request.GET.get("id","")
if id != "":
model = models.Excel_import_file_main.objects.filter(id=id)[0]
if model:
filenameSaved = model.filenameSaved
folder = os.path.abspath(os.path.join(os.getcwd(), ""))
filepathFull = folder + '/static/upload/' + filenameSaved
workbook = xlrd.open_workbook(filepathFull)
# sheet1Nname = workbook.sheet_names()[1]#根据下标获取sheet名称
# 根据sheet索引或者名称获取sheet内容,同时获取sheet名称、行数、列数
# sheet2 = workbook.sheet_by_index(1)
# sheet2 = workbook.sheet_by_name('Sheet2')
# print(sheet2.name, sheet2.nrows, sheet2.ncols)
# 根据sheet名称获取整行和整列的值
# sheet2 = workbook.sheet_by_name('Sheet2')
# rows = sheet2.row_values(3)
# cols = sheet2.col_values(2)
# print rows
# print cols
# 那么如果是在脚本中需要获取并显示单元格内容为日期类型的,可以先做一个判断。判断ctype是否等于3,如果等于3,则用时间格式处理:
# if (sheet.cell(row, col).ctype == 3):
# date_value = xlrd.xldate_as_tuple(sheet.cell_value(row, col), book.datemode)
# date_tmp = date(*date_value[:3]).strftime('%Y/%m/%d')
sheet = workbook.sheet_by_index(0)
rows = sheet.row_values(0) # 原始列名
rowsCount = sheet.nrows
colsCount = sheet.ncols
#初始化字段名
fields = models.Excel_import_file_fields_name.objects.filter(excelImportFileMainId=id).order_by("fieldSn")
returnData = {"total": rowsCount-1, "rows": []}
pageIndex = request.GET.get('pageIndex', 1)
pageSize = request.GET.get('pageSize', 20)
# print(pageIndex)
start = (int(pageIndex) - 1) * int(pageSize)
end = int(pageIndex) * int(pageSize)
if end+1 <= rowsCount:
end += 1
else:
end = rowsCount
for i in range(start+1,end):
tempDict = {}
if fields:
for j,f in enumerate(fields):
value = str(sheet.cell(i, j).value)
if f.colType == "number":
value = round(float(value),2)
#print(i,j,value)
tempDict["field"+str(f.id)] = value
#print("------------")
returnData['rows'].append(tempDict)
# 最后用dumps包装下,json.dumps({"rows": [{"gameorderid": 1}, {"gameorderid": 22}]})
return HttpResponse(json.dumps(returnData))
@csrf_exempt
def excel_import_file_main_del(request):
data = request.POST
idsForDelete = data["idsForDelete"]
try:
with transaction.atomic():
if idsForDelete != "":
idsList = idsForDelete.split(",")
for id in idsList:
model = models.Excel_import_file_main.objects.filter(id=id)[0]
if model:
tableName = model.tableName
filenameSaved = model.filenameSaved
#删除表
sql = "SELECT count(*) as acount FROM information_schema.TABLES WHERE table_name ='" + model.tableName + "'"
row = executeSql(sql)
if int(row[0]) > 0:
sql = "drop table " + tableName
executeSql(sql)
#删除文件
folder = os.path.abspath(os.path.join(os.getcwd(), ""))
filepathFull = folder + '/static/upload/' + filenameSaved
if os.path.exists(filepathFull):
os.remove(filepathFull)
#删除记录
models.Excel_import_file_main.objects.filter(id=id).delete()
except Exception as e:
print(e)
return HttpResponse("执行出现错误!")
return HttpResponse("T")
@csrf_exempt
def ajaxExcelImport(request):
data = request.POST
id = data["id"]
try:
with transaction.atomic():
if id != "":
model = models.Excel_import_file_main.objects.filter(id=id)[0]
if model:
if model.importTime != None:
return HttpResponse("已导入!")
filenameSaved = model.filenameSaved
folder = os.path.abspath(os.path.join(os.getcwd(), ""))
filepathFull = folder + '/static/upload/' + filenameSaved
workbook = xlrd.open_workbook(filepathFull)
# sheet1Nname = workbook.sheet_names()[1]#根据下标获取sheet名称
# 根据sheet索引或者名称获取sheet内容,同时获取sheet名称、行数、列数
# sheet2 = workbook.sheet_by_index(1)
# sheet2 = workbook.sheet_by_name('Sheet2')
# print(sheet2.name, sheet2.nrows, sheet2.ncols)
# 根据sheet名称获取整行和整列的值
# sheet2 = workbook.sheet_by_name('Sheet2')
# rows = sheet2.row_values(3)
# cols = sheet2.col_values(2)
# print rows
# print cols
# 那么如果是在脚本中需要获取并显示单元格内容为日期类型的,可以先做一个判断。判断ctype是否等于3,如果等于3,则用时间格式处理:
# if (sheet.cell(row, col).ctype == 3):
# date_value = xlrd.xldate_as_tuple(sheet.cell_value(row, col), book.datemode)
# date_tmp = date(*date_value[:3]).strftime('%Y/%m/%d')
sheet = workbook.sheet_by_index(0)
rows = sheet.row_values(0) # 原始列名
rowsCount = sheet.nrows
colsCount = sheet.ncols
# excel数据类型对应数据库字段类型
dataType = {"empty": "varchar(500)", "string": "varchar(500)", "number": "DOUBLE", "date": "datetime",
"boolean": "varchar(50)", "error": "varchar(500)"}
# 建表语句
sqlCreateTable = "CREATE TABLE " + model.tableName + "(id INT PRIMARY KEY AUTO_INCREMENT"
# 初始化字段名
fields = models.Excel_import_file_fields_name.objects.filter(excelImportFileMainId=id).order_by("fieldSn")
fieldList = []
if fields:
for f in fields:
fieldList.append(f.fieldNameNew)
if f.fieldNameNew != "":
sqlCreateTable += "," + f.fieldNameNew + " " + dataType[f.colType]
sqlCreateTable += ")"
sql = "SELECT count(*) as acount FROM information_schema.TABLES WHERE table_name ='" + model.tableName + "'"
row = executeSql(sql)
if int(row[0]) == 0:
print(sqlCreateTable)
executeSql(sqlCreateTable)
insertSql = ""
for i in range(1, rowsCount):
fieldsList = ""
valuesList = ""
if fields:
for j, f in enumerate(fields):
value = str(sheet.cell(i, j).value)
if f.colType == "number":
if str(value) == '':
value=0
else:
value = round(float(value), 2)
#print(i, j, value)
if fieldsList == "":
fieldsList = f.fieldNameNew
valuesList = "'"+str(value).replace("'","''")+"'"
else:
fieldsList = fieldsList + "," + f.fieldNameNew
valuesList = valuesList + ",'" + str(value).replace("'","''") + "'"
#if insertSql == "":
insertSql = "insert into " + model.tableName + " (" + fieldsList + ") values (" + valuesList + ")"
print(insertSql)
executeSql(insertSql)
#else:
# insertSql += ";insert into " + model.tableName + " (" + fieldsList + ") values (" + valuesList + ")"
print("------------")
print(insertSql)
models.Excel_import_file_main.objects.filter(id=id).update(importTime=datetime.datetime.now())
except Exception as e:
print(e)
return HttpResponse("执行出现错误!")
return HttpResponse("T")
def excelPreviewImport(request):
id = request.GET.get("id","")
print('id', id)
objList = models.Excel_import_file_main.objects.filter(id=id)
if objList:
obj = objList[0]
objFieldList = models.Excel_import_file_fields_name.objects.filter(excelImportFileMainId=id)
return render(request, "system/myTool/excelPreviewImport.html", {"obj":obj, "objFieldList":objFieldList})
@csrf_exempt
def ajaxExecSql(request):
data = request.POST
sqlStr = data["sqlStr"]
if sqlStr != "":
cursor = connection.cursor()
cursor.execute(sqlStr)
#row = cursor.fetchone() # 返回结果行 或使用 #rows = cursor.fetchall()
rows = cursor.fetchall()
#print(rows)
workbook = xlwt.Workbook(encoding='utf-8')
booksheet = workbook.add_sheet('Sheet1', cell_overwrite_ok=True)
workbook.add_sheet('Sheet2')
for i,row in enumerate(rows):
for j,col in enumerate(row):
print(col)
booksheet.write(i, j, str(col))
#print("---------")
folder = os.path.abspath(os.path.join(os.getcwd(), ""))
filepathFull = folder + '/static/download/' + 'tempExcel.xls'
workbook.save(filepathFull)
return HttpResponse("tempExcel.xls")
```
#### File: system/views/viewsInformType.py
```python
from django.shortcuts import render
from django.shortcuts import HttpResponse
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from datetime import datetime
from django.db.models import Q
import json
from system import models
from system import helpClass
from system.helpClass import login_required
from django.db import transaction
import traceback
purCodeMain = "0008"
@login_required
def index(request):
purviewList = request.session.get('purviewList', []) # 权限列表,list
purCode = "V" + purCodeMain
if purCode in purviewList:
allowV = "T"
else:
allowV = "F"
purviewA = "F"
purviewE = "F"
purviewD = "F"
if "A" + purCodeMain in purviewList:
purviewA = "T"
if "E" + purCodeMain in purviewList:
purviewE = "T"
if "D" + purCodeMain in purviewList:
purviewD = "T"
return render(request, "system/informType/index.html", locals())
# 保存数据(添加或修改)
@csrf_exempt
def ajaxSaveInformType(request):
data = request.POST
# print(data)
try:
with transaction.atomic():
if data != "":
informName = data["informName"]
informTag = data["informTag"]
informInfo = data["informInfo"]
condition = data["condition"]
codePosition = data["codePosition"]
paramMemo = data["paramMemo"]
memo = data["memo"]
loginInfo = request.session.get('loginInfo', "")
adder = loginInfo["userId"]
sellerId = loginInfo["sellerId"]
id = data["id"]
if id == "":
# 判断是否有操作权限
purviewList = request.session.get('purviewList', []) # 权限列表,list
purCode = "A" + purCodeMain
if purCode not in purviewList:
scription = "对不起!您<span style='color:red'>没有权限</span>添加此数据(" + purCode + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
# 判断唯一性(是否存在)
model = models.InformType.objects.filter(informName=informName, deletetime=None)
if model:
scription = "消息名称已经存在(" + informName + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
model = models.InformType.objects.filter(informTag=informTag, deletetime=None)
if model:
scription = "消息标志已经存在(" + informTag + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
obj = models.InformType.objects.create()
obj.informName = informName
obj.informTag = informTag
obj.informInfo = informInfo
obj.condition = condition
obj.codePosition = codePosition
obj.paramMemo = paramMemo
obj.memo = memo
obj.adder = models.User.objects.filter(id=adder)[0]
count = obj.save()
else:
# 判断是否有操作权限
purviewList = request.session.get('purviewList', []) # 权限列表,list
purCode = "E" + purCodeMain
if purCode not in purviewList:
scription = "对不起!您<span style='color:red'>没有权限</span>修改此数据(" + purCode + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
# 判断唯一性(是否存在)
model = models.InformType.objects.exclude(id=id).filter(informName=informName, deletetime=None)
if model:
scription = "消息名称已经存在(" + informName + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
model = models.InformType.objects.exclude(id=id).filter(informTag=informTag, deletetime=None)
if model:
scription = "消息标志已经存在(" + informTag + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
count = models.InformType.objects.filter(id=id).update(informName=informName, informTag=informTag,
informInfo=informInfo, condition=condition,
codePosition=codePosition,
paramMemo=paramMemo, memo=memo,
updatetime=datetime.now(),
updater=models.User.objects.filter(id=adder)[
0])
scription = "操作成功,影响行数:" + str(count)
return JsonResponse({"result": "T", "scription": scription, "extendInfo": "你的extendInfo"})
else:
scription = "提交参数错误!"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
except Exception as e:
# print(e)
traceback.print_exc()
scription = "执行时发生异常!(Exception):" + traceback.format_exc()
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
# 删除数据
@csrf_exempt
def ajaxDelInformType(request):
try:
with transaction.atomic():
# 判断是否有操作权限
purviewList = request.session.get('purviewList', []) # 权限列表,list
purCode = "D" + purCodeMain
if purCode not in purviewList:
scription = "对不起!您<span style='color:red'>没有权限</span>删除此数据(" + purCode + ")"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
idsForDelete = request.POST.get("idsForDelete", "")
if idsForDelete != "":
idsList = idsForDelete.strip(',').split(',')
loginInfo = request.session.get('loginInfo', "")
deleter = loginInfo["userId"]
count = models.InformType.objects.filter(id__in=idsList).update(deletetime=datetime.now(), deleter=
models.User.objects.filter(id=deleter)[0])
scription = "成功删除 " + str(count) + " 条数据!"
return JsonResponse({"result": "T", "scription": scription, "extendInfo": "你的extendInfo"})
except Exception as e:
# print(e)
traceback.print_exc()
scription = "执行时发生异常!(Exception):" + traceback.format_exc()
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
# gridData
@csrf_exempt
def gridDataInformType(request):
keywords = request.GET.get('keywords', '')
# sortName = tableName & sortOrder = desc
sortName = request.GET.get('sortName', '')
sortOrder = request.GET.get('sortOrder', '')
orderBy = "-id"
if sortName != "":
if sortOrder == "" or sortOrder == "asc":
orderBy = sortName
else:
orderBy = "-" + sortName
'''服务端分页时,前端需要传回:limit(每页需要显示的数据量),offset(分页时 数据的偏移量,即第几页)'''
'''mysql 利用 limit语法 进行分页查询'''
'''服务端分页时,需要返回:total(数据总量),rows(每行数据) 如: {"total": total, "rows": []}'''
pageIndex = request.GET.get('pageIndex', 1)
pageSize = request.GET.get('pageSize', 20)
# print(pageIndex)
start = (int(pageIndex) - 1) * int(pageSize)
end = int(pageIndex) * int(pageSize)
loginInfo = request.session.get('loginInfo', "")
if loginInfo == "":
returnData = {"total": -1, "errorInfo": "登录超时,请重新登录!!!", "rows": []}
return HttpResponse(json.dumps(returnData))
records = models.InformType.objects.order_by(orderBy).filter(deletetime=None)
if keywords != "":
records = records.filter(
Q(informName__contains=keywords) | Q(informTag__contains=keywords) | Q(informInfo__contains=keywords) | Q(
condition__contains=keywords) | Q(codePosition__contains=keywords) | Q(
paramMemo__contains=keywords) | Q(memo__contains=keywords))
total = records.count()
results = records[start:end]
adder = loginInfo["userId"]
sellerId = loginInfo["sellerIdAll"]
returnData = {"total": total, "rows": []} #########非常重要############
for result in results:
adder = 0
updater = 0
deleter = 0
adderName = ""
updaterName = ""
deleterName = ""
if result.adder != None:
adder = result.adder.id
adderName = result.adder.username
if result.updater != None:
updater = result.updater.id
updaterName = result.updater.username
if result.deleter != None:
deleter = result.deleter.id
deleterName = result.deleter.username
targetUserList=""
targetUserNameList = ""
targetDeptList=""
if sellerId>0:
# 加载商家系统消息发送对象(用户)
informTargetUsers = models.InformTargetUser.objects.filter(deletetime=None,sellerId=sellerId,informTypeId=result.id)
if informTargetUsers.count()>0:
for informTargetUser in informTargetUsers:
if targetUserList == "":
targetUserList = str(informTargetUser.userId.id)
targetUserNameList = informTargetUser.userId.username
else:
targetUserList = targetUserList + "," + str(informTargetUser.userId.id)
targetUserNameList = targetUserNameList + "," + informTargetUser.userId.username
# 加载商家系统消息发送对象(部门)
informTargetDepts = models.InformTargetDept.objects.filter(deletetime=None,sellerId=sellerId,informTypeId=result.id)
if informTargetDepts.count()>0:
for informTargetDept in informTargetDepts:
if targetDeptList == "":
targetDeptList = informTargetDept.deptName+":"+informTargetDept.positionList
else:
targetDeptList = targetDeptList + "|" + informTargetDept.deptName+":"+informTargetDept.positionList
returnData['rows'].append({
"id": result.id,
"informName": str(result.informName),
"informTag": str(result.informTag),
"informInfo": str(result.informInfo),
"condition": str(result.condition),
"codePosition": str(result.codePosition),
"paramMemo": str(result.paramMemo),
"memo": str(result.memo),
"addtime": str(result.addtime),
"updatetime": str(result.updatetime),
"deletetime": str(result.deletetime),
"adder": adder,
"updater": updater,
"deleter": deleter,
"adderName": adderName,
"updaterName": updaterName,
"deleterName": deleterName,
"targetUserList":targetUserList,
"targetUserNameList":targetUserNameList,
"targetDeptList":targetDeptList,
# "addTime": time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime(result.addTime)),
# "PurchaseTime": time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime(results['purchasetime'])),
# 将 时间戳 转换为 UTC时间
})
# 最后用dumps包装下,json.dumps({"rows": [{"gameorderid": 1}, {"gameorderid": 22}]})
return HttpResponse(json.dumps(returnData))
#以下为额外方法
#商家设置系统消息发送对象
def indexTarget(request):
# request.POST
# request.GET
# return HttpResponse("Hello World!")
# return render(request,"index.html")
loginInfo = request.session.get('loginInfo', "")
if loginInfo == "":
return render(request, "system/login/login.html", locals())
purCodeMain = "0011"
purviewList = request.session.get('purviewList', []) # 权限列表,list
purCode = "V" + purCodeMain
if purCode in purviewList:
allowV = "T"
else:
allowV = "F"
purviewA = "F"
purviewE = "F"
purviewD = "F"
if "A" + purCodeMain in purviewList:
purviewA = "T"
if "E" + purCodeMain in purviewList:
purviewE = "T"
if "D" + purCodeMain in purviewList:
purviewD = "T"
loginInfo = request.session.get('loginInfo', "")
adder = loginInfo["userId"]
sellerId = loginInfo["sellerIdAll"]
#获取用户
condtion = {"parentSellerId":sellerId}
userList = helpClass.getUserForSelect(**condtion)
userJson = json.dumps(userList)
#获取职位
positionList = helpClass.getDicMainListForSelect(sellerId,"position")
positionJson = json.dumps(positionList)
#获取部门
departmentList = helpClass.getDicMainListForSelect(sellerId, "department")
return render(request, "system/informTarget/index.html", locals())
#提交设置系统消息发送对象
@csrf_exempt
def setInformTarget(request):
data = request.POST
# print(data)
try:
with transaction.atomic():
if data != "":
loginInfo = request.session.get('loginInfo', "")
adder = loginInfo["userId"]
sellerId = loginInfo["sellerIdAll"]
informTypeId = data["informTypeId"]
targetUserListStr = data["targetUserList"]
departmentCount = data["departmentCount"]
#设置接收系统消息部门开始
deptNameList = []
i = 0
while(i < int(departmentCount)):
deptName = data["dept"+str(i)]
position = data["position"+str(i)]
#如果勾选了部门
if deptName!="":
#查找记录是否已在存在,如果不存在,则添加,反之则修改(更新)
records = models.InformTargetDept.objects.filter(deletetime=None,sellerId=sellerId,informTypeId=informTypeId,deptName=deptName)
if records.count()>0:
models.InformTargetDept.objects.filter(deletetime=None, sellerId=sellerId,
informTypeId=informTypeId, deptName=deptName).update(positionList=position,
updatetime=datetime.now(),
updater=models.User.objects.filter(id=adder)[0])
else:
obj = models.InformTargetDept.objects.create()
obj.sellerId = models.Seller.objects.get(id=sellerId)
obj.informTypeId = models.InformType.objects.get(id=informTypeId)
obj.deptName = deptName
obj.positionList = position
obj.adder = models.User.objects.get(id=adder)
obj.save()
deptNameList.append(deptName)
i = i+1
# 未勾选部门,删除记录
models.InformTargetDept.objects.exclude(deptName__in=deptNameList).filter(deletetime=None,
sellerId=sellerId,
informTypeId=informTypeId).update(deletetime=datetime.now(), deleter=models.User.objects.filter(id=adder)[0])
#设置接收系统消息部门结束
#设置接收系统消息用户开始
#1.删除现有记录
models.InformTargetUser.objects.filter(deletetime=None,sellerId=sellerId,informTypeId=informTypeId).update(deletetime=datetime.now(), deleter=models.User.objects.filter(id=adder)[0])
#2.添加新记录
if targetUserListStr!="":
targetUserList = targetUserListStr.split(",")
for userId in targetUserList:
obj = models.InformTargetUser.objects.create()
obj.sellerId = models.Seller.objects.get(id=sellerId)
obj.informTypeId = models.InformType.objects.get(id=informTypeId)
obj.userId = models.User.objects.get(id=userId)
obj.adder = models.User.objects.get(id=adder)
obj.save()
#设置接收系统消息用户结束
scription = "操作成功!"
return JsonResponse({"result": "T", "scription": scription, "extendInfo": "你的extendInfo"})
else:
scription = "提交参数错误!"
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
except Exception as e:
# print(e)
traceback.print_exc()
scription = "执行时发生异常!(Exception):" + traceback.format_exc()
return JsonResponse({"result": "F", "scription": scription, "extendInfo": "你的extendInfo"})
``` |
{
"source": "17628279559/highSQL_private",
"score": 3
} |
#### File: python/support/picture_download.py
```python
import requests
import appbk_sql
from lxml import etree
from urllib import request
import time
import re
path = r'D:\\img\\'
path_imdb = r'E:\\img_imdb\\'
#from httplib2 import socks #下载图片设置代理
#import socket
#socks.setdefaultproxy(socks.PROXY_TYPE_HTTP, "127.0.0.1", 1080)
#socket.socket = socks.socksocket
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}
#从mysql获取所有id和图片链接
def get_movieid_and_picture():
sql = "select movieid,picture from movies where picture is not null and pic is null;"
result = appbk_sql.mysql_com(sql)
data = []
for i in result:
data.append([i['movieid'],i['picture']])
return data
#下载图片,存入本地,并更新数据库,在pic一栏存入"无意义路径",表示该movie已经下载图片
def function_download():
data = get_movieid_and_picture()
opener = request.build_opener()
opener.addheaders = ([("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"),])
request.install_opener(opener)
for item in data:
try:
print(item)
request.urlretrieve(item[1],path+str(item[0])+'.jpg')
sql = "UPDATE movies SET pic='{}' WHERE movieid = {}".format(str(item[0])+'.jpg',item[0])
appbk_sql.mysql_com(sql)
except Exception as e:
print(e)
#从mysql获取所有id和imdbid
def get_id_link():
sql = "SELECT a.movieid,imdbid FROM `links` a,`movies` b where a.movieid = b.movieid and pic_imdb is null;"
result = appbk_sql.mysql_com(sql)
data = []
for i in result:
data.append([i['movieid'],i['imdbid']])
return data
#下载图片,存入本地,并更新数据库,在pic_imdb一栏存入"无意义路径",表示该movie已经下载图片
def function_download_picture_from_imdb():
url_imdb="https://www.imdb.com/title/tt0{}/?ref_=fn_al_tt_1"
opener = request.build_opener()
opener.addheaders = ([("User-Agent","Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"),])
request.install_opener(opener)
for item in get_id_link():
try:
data = requests.get(url_imdb.format(str(item[1])),headers=headers)
except Exception as e:
print(item[0],"下载出现问题",e)
html=etree.HTML(data.text)
data.close()
try:
imdb_picture_url = html.xpath(r'//div[@class="ipc-media ipc-media--poster-27x40 ipc-image-media-ratio--poster-27x40 ipc-media--baseAlt ipc-media--poster-l ipc-poster__poster-image ipc-media__img"]/img/@src')[0]
except Exception as e:
print(item[0],"xPath有问题",e)
try:
request.urlretrieve(imdb_picture_url,path_imdb+str(item[0])+'.jpg')
sql = "UPDATE movies SET pic_imdb='{}' WHERE movieid = {}".format(str(item[0])+'.jpg',item[0])
print(item[0],"下载成功")
appbk_sql.mysql_com(sql)
except Exception as e:
print(item[0],"下载有问题",e)
if __name__ == '__main__':
#function_download()
#function_download_picture_from_imdb()
pass
``` |
{
"source": "17701253801/firefly-proxy",
"score": 2
} |
#### File: DEPRECATED_PYTHON_SRC/component/_ui_mac.py
```python
from lib.ipc import ActorObject
class UI(ActorObject):
def __init__(self, coordinator):
super(UI, self).__init__()
self.coordinator = coordinator
def run(self):
try:
# due to some bugs, the import must goes after Process.start
# see this link: http://stackoverflow.com/questions/21143866/python-tkinter-application-causes-fork-exec-error-on-mac-os-x
self.start_actor()
from component._ui_mac_app import FireflyApp
app = FireflyApp(self.coordinator)
app.run()
self.quit_actor()
except Exception, e:
print e
```
#### File: DEPRECATED_PYTHON_SRC/component/_ui_win.py
```python
import os
from lib.ipc import ActorProcess
from lib.systray import SysTrayIcon
from lib.utils import init_logging
class UI(ActorProcess):
def __init__(self, coordinator):
super(UI, self).__init__()
self.coordinator = coordinator
def systray_quit(self, systray_ref):
pass
def sytray_launch_browser(self, systray_ref):
self.coordinator.IPC_launch_browser()
def systray_open_webadmin(self, systray_ref):
self.coordinator.IPC_open_admin_url()
def run(self):
init_logging()
self.start_actor()
rootdir = self.coordinator.get('rootdir')
confdata = self.coordinator.get('confdata')
icon = os.path.join(rootdir, confdata['icon_path'])
SysTrayIcon(
icon,
u'萤火虫翻墙代理',
(
(u'翻墙浏览', None, self.sytray_launch_browser),
(u'配置代理', None, self.systray_open_webadmin),
(u'退出', None, 'QUIT')
),
on_quit=self.systray_quit,
default_menu_index=1,
)
self.quit_actor()
```
#### File: DEPRECATED_PYTHON_SRC/gsocks/msg.py
```python
import os
import struct
from gevent import socket
import dpkt
if os.name == 'nt':
import win_inet_pton
socket.inet_pton = win_inet_pton.inet_pton
socket.inet_ntop = win_inet_pton.inet_ntop
# version
SOCKS5 = '\x05'
# reserve
RSV = '\x00'
# command
CONNECT = '\x01'
BIND = '\x02'
UDP_ASSOCIATE = '\x03'
# methods
NO_AUTHENTICATION_REQUIRED = '\x00'
NO_ACCEPTABLE_METHODS = '\xff'
# address type
IP_V4 = '\x01'
DOMAIN_NAME = '\x03'
IP_V6 = '\x04'
# reply
SUCCEEDED = '\x00'
GENERAL_SOCKS_SERVER_FAILURE = '\x01'
CONNECT_NOT_ALLOWED = '\x02'
NETWORK_UNREACHABLE = '\x03'
CONNECTION_REFUSED = '\x04'
TTL_EXPIRED = '\x06'
CMD_NOT_SUPPORTED = '\x07'
ADDR_TYPE_NOT_SUPPORTED = '\x08'
def pack_addr(addrtype, addr):
if addrtype == IP_V4:
s = socket.inet_pton(socket.AF_INET, addr) # @UndefinedVariable
elif addrtype == IP_V6:
s = socket.inet_pton(socket.AF_INET6, addr) # @UndefinedVariable
elif addrtype == DOMAIN_NAME:
s = struct.pack('B', len(addr))
s += addr
else:
raise dpkt.PackError("Unknown address type %s" % addrtype.encode('hex'))
return s
def unpack_addr(addrtype, buf, offset):
if addrtype == IP_V4:
addr = socket.inet_ntop(socket.AF_INET, buf[offset:(offset+4)]) # @UndefinedVariable
nxt = offset+4
elif addrtype == IP_V6:
addr = socket.inet_ntop(socket.AF_INET6, buf[offset:(offset+16)]) # @UndefinedVariable
nxt = offset+16
elif addrtype == DOMAIN_NAME:
length = struct.unpack('B', buf[offset])[0]
addr = buf[(offset+1):(offset+1+length)]
nxt = offset+1+length
else:
raise dpkt.UnpackError("Unknown address type %s" % addrtype.encode('hex'))
return addr, nxt
class InitRequest(dpkt.Packet):
__hdr__ = (
('version', 'c', SOCKS5),
('nmethods', 'B', 1),
('methods', 's', NO_AUTHENTICATION_REQUIRED),
)
def pack(self):
return self.version + struct.pack('B', self.nmethods) + self.methods
def unpack(self, buf):
self.version = buf[0]
self.nmethods = struct.unpack('B', buf[1])[0]
self.methods = buf[2:(2+self.nmethods)]
class InitReply(dpkt.Packet):
__hdr__ = (
('version', 'c', SOCKS5),
('method', 'c', NO_AUTHENTICATION_REQUIRED),
)
class Request(dpkt.Packet):
__hdr__ = (
('version', 'c', SOCKS5),
('cmd', 'c', CONNECT),
('rsv', 'c', RSV),
('addrtype', 'c', IP_V4),
('dstaddr', 's', ''),
('dstport', 'H', 0x3003),
)
def pack(self):
addr = pack_addr(self.addrtype, self.dstaddr)
return self.version + self.cmd + self.rsv + \
self.addrtype + addr + struct.pack('!H', self.dstport)
def unpack(self, buf):
self.version = buf[0]
self.cmd = buf[1]
self.rsv = buf[2]
self.addrtype = buf[3]
self.dstaddr, offset = unpack_addr(self.addrtype, buf, 4)
self.dstport = struct.unpack('!H', buf[offset:(offset+2)])[0]
class Reply(dpkt.Packet):
__hdr__ = (
('version', 'c', SOCKS5),
('rep', 'c', SUCCEEDED),
('rsv', 'c', RSV),
('addrtype', 'c', IP_V4),
('bndaddr', 's', ''),
('bndport', 'H', 0x3003),
)
def pack(self):
addr = pack_addr(self.addrtype, self.bndaddr)
return self.version + self.rep + self.rsv + \
self.addrtype + addr + struct.pack('!H', self.bndport)
def unpack(self, buf):
self.version = buf[0]
self.rep = buf[1]
self.rsv = buf[2]
self.addrtype = buf[3]
self.bndaddr, offset = unpack_addr(self.addrtype, buf, 4)
self.bndport = struct.unpack('!H', buf[offset:(offset+2)])[0]
class UDPRequest(dpkt.Packet):
__hdr__ = (
('rsv', '2s', RSV+RSV),
('frag', 'c', '\x00'),
('addrtype', 'c', IP_V4),
('dstaddr', 's', ''),
('dstport', 'H', 0x3003),
)
def pack(self):
addr = pack_addr(self.addrtype, self.dstaddr)
return self.rsv + self.frag + self.addrtype \
+ addr + struct.pack('!H', self.dstport) + self.data
def unpack(self, buf):
self.rsv = buf[0:2]
self.frag = buf[2]
self.addrtype = buf[3]
self.dstaddr, offset = unpack_addr(self.addrtype, buf, 4)
self.dstport = struct.unpack('!H', buf[offset:(offset+2)])[0]
self.data = buf[(offset+2):]
```
#### File: DEPRECATED_PYTHON_SRC/gsocks/relay.py
```python
import logging
import time
from gevent import socket
from gevent import select
from utils import request_fail, basic_handshake_server, read_request, \
sock_addr_info, request_success, pipe_tcp, bind_local_udp, addr_info, \
bind_local_sock_by_addr, pipe_udp
from msg import CMD_NOT_SUPPORTED, CONNECT, BIND, UDP_ASSOCIATE, \
GENERAL_SOCKS_SERVER_FAILURE, UDPRequest
log = logging.getLogger(__name__)
class RelaySessionError(Exception): pass
class RelayFactory(object):
def create_relay_session(self, socksconn, clientaddr):
raise NotImplementedError
class RelaySession(object):
def __init__(self, socksconn):
self.socksconn = socksconn
self.timeout = self.socksconn.gettimeout()
self.allsocks = [self.socksconn]
def track_sock(self, sock):
# track all sockets so we know what to clean
self.allsocks.append(sock)
def cmd_bind(self, req):
request_fail(self.socksconn, req, CMD_NOT_SUPPORTED)
def proc_tcp_request(self, req):
raise NotImplementedError
def relay_tcp(self):
raise NotImplementedError
def cmd_connect(self, req):
# TCP usually follows two steps.
self.proc_tcp_request(req)
self.relay_tcp()
def cmd_udp_associate(self, req):
# UDP is more specific
raise NotImplementedError
def process(self):
try:
if not basic_handshake_server(self.socksconn):
self.clean()
return
req = read_request(self.socksconn)
{
CONNECT: self.cmd_connect,
BIND: self.cmd_bind,
UDP_ASSOCIATE : self.cmd_udp_associate
}[req.cmd](req)
self.clean()
except Exception, e:
log.error("[Exception][RelaySession]: %s" % str(e))
self.clean()
def clean(self):
for sock in self.allsocks:
if sock:
sock.close()
class SocksSession(RelaySession):
def __init__(self, socksconn):
super(SocksSession, self).__init__(socksconn)
self.remoteconn = None
self.client_associate = None
self.last_clientaddr = None
self.client2local_udpsock = None
self.local2remote_udpsock = None
def proc_tcp_request(self, req):
dst = (req.dstaddr, req.dstport)
log.info("TCP request address: (%s:%d)" % dst)
self.remoteconn = socket.create_connection(dst, self.timeout)
self.track_sock(self.remoteconn)
addrtype, bndaddr, bndport = sock_addr_info(self.remoteconn)
request_success(self.socksconn, addrtype, bndaddr, bndport)
def relay_tcp(self):
pipe_tcp(self.socksconn, self.remoteconn, self.timeout, self.timeout)
def proc_udp_request(self, req):
self.client_associate = (req.dstaddr, req.dstport)
log.info("UDP client adress: (%s:%d)" % self.client_associate)
self.last_clientaddr = self.client_associate
self.client2local_udpsock = bind_local_udp(self.socksconn)
if not self.client2local_udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return False
self.track_sock(self.client2local_udpsock)
bndtype, bndaddr, bndport = sock_addr_info(self.client2local_udpsock)
log.info("UDP ACCOSIATE: (%s:%d)" % (bndaddr, bndport))
request_success(self.socksconn, bndtype, bndaddr, bndport)
return True
def wait_for_first_udp(self):
# wait util first VALID packet come.
start = time.time()
timeout = self.timeout
while True:
readable, _, _ = select.select([self.socksconn, self.client2local_udpsock], [], [], timeout)
if not readable:
raise socket.timeout("timeout") # @UndefinedVariable
if self.socksconn in readable:
raise RelaySessionError("unexcepted read-event from tcp socket in UDP session")
timeout -= (time.time() - start)
if timeout <= 0:
raise socket.timeout("timeout") # @UndefinedVariable
data, addr = self.client2local_udpsock.recvfrom(65536)
try:
udpreq = UDPRequest(data)
if udpreq.frag == '\x00':
return data, addr
except:
pass
def relay_udp(self, firstdata, firstaddr):
def addrchecker():
def _(ip, port):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == (ip, port):
return True
log.info("UDP packet dropped for invalid address.")
return False
return _
def c2r():
def _(data, addr):
self.last_clientaddr = addr
try:
udpreq = UDPRequest(data)
if udpreq.frag != '\x00':
return None, None
return udpreq.data, (udpreq.dstaddr, udpreq.dstport)
except Exception, e:
log.error("[relay_udp][c2r] Exception: %s", str(e))
return None, None
return _
def r2c():
def _(data, addr):
addrtype, dstaddr, dstport = addr_info(addr)
udpreq = UDPRequest(addrtype=addrtype, dstaddr=dstaddr, dstport=dstport, data=data)
return udpreq.pack(), self.last_clientaddr
return _
data, dst = c2r()(firstdata, firstaddr)
self.local2remote_udpsock = bind_local_sock_by_addr(dst)
self.track_sock(self.local2remote_udpsock)
self.local2remote_udpsock.send(data)
pipe_udp([self.socksconn],
self.client2local_udpsock, self.local2remote_udpsock,
self.timeout, self.timeout,
addrchecker(), c2r(), r2c())
def cmd_udp_associate(self, req):
if self.proc_udp_request(req):
firstdata, firstaddr = self.wait_for_first_udp()
self.relay_udp(firstdata, firstaddr)
class SocksRelayFactory(RelayFactory):
def create_relay_session(self, socksconn, clientaddr):
log.info("New socks connection from %s" % str(clientaddr))
return SocksSession(socksconn)
```
#### File: DEPRECATED_PYTHON_SRC/gsocks/server.py
```python
import logging
from gevent import socket
from gevent.server import StreamServer
from gevent.pool import Pool
log = logging.getLogger(__name__)
class SocksServer(object):
def __init__(self, ip, port, relayfactory, timeout=30, maxclient=200):
self.ip = ip
self.port = port
self.timeout = timeout
self.relayfactory = relayfactory
self.pool = Pool(maxclient)
addrinfo = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM, socket.SOL_TCP) # @UndefinedVariable
_, _, _, _, localaddr = addrinfo[0]
self.server = StreamServer(localaddr, self._handle, spawn=self.pool)
def _handle(self, sock, addr):
try:
sock.settimeout(self.timeout)
session = self.relayfactory.create_relay_session(sock, addr)
session.process()
except Exception, e:
log.error("[Exception][SocksServer]: %s" % str(e))
def stop(self):
return self.server.stop()
@property
def closed(self):
return self.server.closed
def start(self):
self.server.start()
def run(self):
self.server.serve_forever()
```
#### File: firefly-proxy/DEPRECATED_PYTHON_SRC/main.py
```python
import os
import sys
import json
import codecs
import shutil
import time
from datetime import datetime, date
import multiprocessing
import threading
if getattr(sys, 'frozen', False):
if sys.platform == "darwin":
rootdir = os.getcwd()
else:
rootdir = os.path.dirname(sys.executable)
else:
rootdir = os.path.dirname(os.path.realpath(__file__))
# make all filenames based on rootdir being unicode
rootdir = rootdir.decode(sys.getfilesystemencoding())
sys.path.append(rootdir)
from lib.utils import init_logging, local_update_datafile, set_ca_certs_env, singleton_check, singleton_clean,\
open_url
from lib.ipc import ActorObject
from component.ui import UI
from component.admin import Admin
from component.circumvention import CircumventionChannel, remote_update_meek_relays
from component.local import HTTPProxy, SocksProxy
from component.brz import Browser, able_to_setproxy
from component.matcher import create_matcher, blacklist_info, remote_update_blacklist
from component.hosts import hosts_info, remote_update_hosts
class Coordinator(ActorObject):
def __init__(self, rootdir, conf_file):
super(Coordinator, self).__init__()
self.rootdir = rootdir
self.conf_file = conf_file
self.confdata = None
self.admin = None
self.ui = None
self.cc_channel = None
self.matcher = None
self.http_proxy = None
self.socks_proxy = None
self.browser = None
def loadconf(self):
f = codecs.open(os.path.join(self.rootdir, self.conf_file), "r", "utf-8")
self.confdata = json.loads(f.read())
f.close()
def backup_conf(self):
conf = os.path.join(self.rootdir, self.conf_file)
shutil.copy(conf, conf + ".last")
default = conf + ".default"
if not os.path.isfile(default):
shutil.copy(conf, default)
def recover_conf(self):
try:
conf = os.path.join(self.rootdir, self.conf_file)
shutil.copy(conf + ".last", conf)
return True
except:
return False
def initialize(self):
self.singleton = singleton_check(self.rootdir)
if not self.singleton:
sys.exit(-1)
self.loadconf()
self.ref().share('rootdir', self.rootdir)
self.ref().share('confdata', self.confdata)
self.start_actor()
def start_admin(self):
self.admin = Admin(self.ref())
self.admin.start()
def start_cc_channel(self):
try:
self.cc_channel = CircumventionChannel(self.ref())
self.cc_channel.start()
except Exception, e:
print "failed to start circumvention channel: %s" % str(e)
def start_local_proxy(self):
global rootdir
circumvention_url = self.IPC_circumvention_url()
self.matcher = create_matcher(rootdir, self.confdata, circumvention_url)
if self.confdata['enable_http_proxy']:
try:
self.http_proxy = HTTPProxy(self.ref(), self.matcher)
self.http_proxy.start()
except Exception, e:
print "failed to start http proxy: %s" % str(e)
if self.confdata['enable_socks_proxy']:
try:
self.socks_proxy = SocksProxy(self.ref(), self.matcher)
self.socks_proxy.start()
except Exception, e:
print "failed to start socks proxy: %s" % str(e)
def proxy_info(self):
if self.socks_proxy:
#ip, port = self.socks_proxy.ref().IPC_addr()
#return ProxyInfo(socks.PROXY_TYPE_SOCKS5, ip, port, True, None, None)
url = self.socks_proxy.ref().IPC_url()
return {'http': url, 'https': url}
elif self.http_proxy:
#ip, port = self.http_proxy.ref().IPC_addr()
#return ProxyInfo(socks.PROXY_TYPE_HTTP, ip, port, True, None, None)
url = self.http_proxy.ref().IPC_url()
return {'http': url, 'https': url}
else:
#return None
return {}
def update_matcher(self):
circumvention_url = self.IPC_circumvention_url()
self.matcher = create_matcher(rootdir, self.confdata, circumvention_url)
if self.http_proxy:
self.http_proxy.ref().IPC_update_matcher(self.matcher)
if self.socks_proxy:
self.socks_proxy.ref().IPC_update_matcher(self.matcher)
def start_browser(self, url=None):
http_proxy_enabled = True if self.http_proxy else False
socks_proxy_enabled = True if self.socks_proxy else False
try:
if self.confdata['launch_browser']:
set_proxy = True
else:
set_proxy = False
# if we cannot setproxy but required to, then we open config page which
# gives a tip.
if set_proxy and not able_to_setproxy() and not url:
url = self.admin.ref().IPC_url()
self.browser = Browser(self.ref(), http_proxy_enabled, socks_proxy_enabled, initial_url=url, set_proxy=set_proxy)
self.browser.start()
except Exception, e:
print "failed to launch browser failed: %s" % str(e)
def check_and_update_blacklist(self):
try:
blacklist_date = datetime.strptime(self.matcher.blacklist_matcher.meta['date'], '%Y-%m-%d').date()
if date.today() > blacklist_date:
updated = remote_update_blacklist(self.proxy_info(), self.rootdir, self.confdata)
if updated:
self.update_matcher()
except Exception, e:
print "failed to update blacklist: %s" % str(e)
def check_and_update_hosts(self):
try:
hosts_date = datetime.strptime(self.matcher.hosts.meta['date'], '%Y-%m-%d').date()
if date.today() > hosts_date:
updated = remote_update_hosts(self.proxy_info(), self.rootdir, self.confdata)
if updated:
self.update_matcher()
except Exception, e:
print "failed to update hosts: %s" % str(e)
def update_meek_relays(self):
try:
updated = remote_update_meek_relays(self.proxy_info(), self.rootdir, self.confdata)
if updated:
self.cc_channel.ref().IPC_update_meek_relays()
except Exception, e:
print "failed to update meek relays: %s" % str(e)
def check_for_update(self):
time.sleep(20)
if self.cc_channel.type == "meek":
self.update_meek_relays()
self.check_and_update_blacklist()
self.check_and_update_hosts()
def run(self):
try:
self.initialize()
self.start_cc_channel()
self.start_admin()
self.start_local_proxy()
except Exception, e:
print "failed to start basic steps/processes: %s, try to recover ..." % str(e)
if not self.recover_conf():
raise e
self.end()
self.initialize()
self.start_cc_channel()
self.start_admin()
self.start_local_proxy()
self.backup_conf()
if self.confdata['launch_browser']:
self.start_browser()
t = threading.Thread(target=self.check_for_update)
t.daemon = True
t.start()
self.ui = UI(self.ref())
self.ui.run()
self.end()
def end(self):
if self.admin:
self.admin.terminate()
self.admin.join()
if self.cc_channel:
self.cc_channel.terminate()
self.cc_channel.join()
if self.http_proxy:
self.http_proxy.terminate()
self.http_proxy.join()
if self.socks_proxy:
self.socks_proxy.terminate()
self.socks_proxy.join()
if self.browser:
self.browser.terminate()
self.browser.join()
singleton_clean(self.rootdir, self.singleton)
# IPC interfaces
def IPC_quit(self):
self.end()
return True
def IPC_circumvention_url(self):
"""ask circumvention channel for forwarding url"""
return self.cc_channel.ref().IPC_url()
def IPC_socks_proxy_addr(self):
return self.socks_proxy.ref().IPC_addr()
def IPC_http_proxy_addr(self):
return self.http_proxy.ref().IPC_addr()
def IPC_launch_browser(self):
if self.browser and self.browser.is_alive():
return self.browser.ref().IPC_open_default_page()
else:
self.start_browser()
def IPC_open_admin_url(self):
url = self.admin.ref().IPC_url()
if sys.platform == "darwin":
# the browser-open routines do not work in some cases of OS X
# use built-in interface to ensure admin url opens.
open_url(url)
return
if self.browser and self.browser.is_alive():
return self.browser.ref().IPC_open_url(url)
else:
self.start_browser(url)
def IPC_shadowsocks_methods(self):
return self.cc_channel.ref().IPC_shadowsocks_methods()
def IPC_blacklist_info(self):
return blacklist_info(self.rootdir, self.confdata, self.matcher.blacklist_matcher)
def IPC_hosts_info(self):
return hosts_info(self.rootdir, self.confdata, self.matcher.hosts)
def IPC_get_custom_blacklist(self):
return self.matcher.blacklist_matcher.get_custom_blacklist()
def IPC_get_custom_whitelist(self):
return self.matcher.blacklist_matcher.get_custom_whitelist()
def IPC_update_config(self, data):
try:
self.confdata.update(data)
f = codecs.open(os.path.join(self.rootdir, self.conf_file), "w", "utf-8")
f.write(json.dumps(self.confdata,
sort_keys=True,
indent=4,
separators=(',', ': '),
ensure_ascii=False))
f.close()
return data
except Exception, e:
print "failed to update config: %s" % str(e)
return None
def IPC_resume_default_config(self):
conf = os.path.join(self.rootdir, self.conf_file)
shutil.copy(conf + ".default", conf)
self.loadconf()
return self.confdata
def IPC_update_blacklist(self):
try:
updated = remote_update_blacklist(self.proxy_info(), self.rootdir, self.confdata)
if updated:
self.update_matcher()
return True
except Exception, e:
print "failed to update blacklist: %s" % str(e)
return False
def IPC_update_custom_list(self, custom_bl=None, custom_wl=None):
if custom_bl:
local_update_datafile(u"\n".join(custom_bl),
os.path.join(self.rootdir, self.confdata['custom_blacklist']))
if custom_wl:
local_update_datafile(u"\n".join(custom_wl),
os.path.join(self.rootdir, self.confdata['custom_whitelist']))
self.update_matcher()
def IPC_update_hosts(self, remote=True):
try:
if remote:
remote_update_hosts(self.proxy_info(), self.rootdir, self.confdata)
self.update_matcher()
return True
except Exception, e:
print "failed to update hosts: %s" % str(e)
return False
def IPC_update_hosts_disabled(self, disabled):
local_update_datafile(u"\n".join(disabled), os.path.join(self.rootdir, self.confdata['hosts']['disabled']))
self.update_matcher()
def IPC_support_ssh(self):
return self.cc_channel.ref().IPC_support_ssh()
def IPC_setproxy_tip(self):
if not self.confdata['launch_browser']:
return False
return not able_to_setproxy()
def close_std():
sys.stdin.close()
sys.stdin = open(os.devnull)
sys.stderr.close
sys.stderr = open(os.devnull)
def main():
close_std()
multiprocessing.freeze_support()
init_logging()
global rootdir
conf_file = "config.json"
set_ca_certs_env(os.path.join(rootdir, "cacert.pem").encode(sys.getfilesystemencoding()))
coordinator = Coordinator(rootdir, conf_file)
coordinator.run()
if __name__ == '__main__':
main()
```
#### File: DEPRECATED_PYTHON_SRC/meeksocks/relay.py
```python
import logging
import uuid
import random
import ssl
from collections import defaultdict
import gevent
from gevent import select
from gevent import socket
from gevent.queue import Queue, LifoQueue
from gevent.event import Event
from geventhttpclient import HTTPClient, URL
from gsocks.relay import RelayFactory, RelaySession, RelaySessionError
from gsocks.msg import Reply, GENERAL_SOCKS_SERVER_FAILURE
from gsocks.utils import SharedTimer, bind_local_udp, request_fail, request_success, \
sock_addr_info
from constants import SESSION_ID_LENGTH, MAX_PAYLOAD_LENGTH, HEADER_SESSION_ID, \
HEADER_UDP_PKTS, HEADER_MODE, HEADER_MSGTYPE, MSGTYPE_DATA, MODE_STREAM, \
HEADER_ERROR, CLIENT_MAX_TRIES, CLIENT_RETRY_DELAY, CLIENT_INITIAL_POLL_INTERVAL, \
CLIENT_POLL_INTERVAL_MULTIPLIER, CLIENT_MAX_POLL_INTERVAL, MSGTYPE_TERMINATE, \
CLIENT_MAX_FAILURE
log = logging.getLogger(__name__)
def session_id():
return str(uuid.uuid4())[:SESSION_ID_LENGTH]
def get_meek_meta(headers, key, default=""):
# requests lib gives lower-string headers
return dict(headers).get(key.lower(), default)
class Relay:
def __init__(self, fronturl="", hostname="", properties="", failure=0):
self.fronturl = fronturl
self.hostname = hostname
self.properties = properties
self.failure = failure
class HTTPClientPool:
def __init__(self):
self.pool = defaultdict(LifoQueue)
def get(self, relay, ca_certs, timeout):
try:
return self.pool[relay.fronturl].get(block=False)
except gevent.queue.Empty:
insecure = "verify" not in relay.properties
if ca_certs:
ssl_options = {'ca_certs': ca_certs, 'ssl_version': ssl.PROTOCOL_TLSv1}
else:
ssl_options = {'ssl_version': ssl.PROTOCOL_TLSv1}
conn = HTTPClient.from_url(
URL(relay.fronturl),
insecure=insecure,
block_size=MAX_PAYLOAD_LENGTH,
connection_timeout=timeout,
network_timeout=timeout,
concurrency=1,
ssl_options=ssl_options
)
return conn
def release(self, relay, conn):
self.pool[relay.fronturl].put(conn)
class MeekSession(RelaySession):
conn_pool = HTTPClientPool()
def __init__(self, socksconn, meek, timeout):
super(MeekSession, self).__init__(socksconn)
self.sessionid = session_id()
self.meek = meek
self.meektimeout = timeout
self.relay = self.meek.select_relay()
self.ca_certs = self.meek.ca_certs
self.httpclient = self.conn_pool.get(self.relay, self.ca_certs, self.meektimeout)
self.udpsock = None
self.allsocks = [self.socksconn]
self.l2m_queue = Queue()
self.m2l_queue = Queue()
self.m_notifier = Event()
self.l_notifier = Event()
self.finish = Event()
self.m_notifier.clear()
self.l_notifier.clear()
self.finish.clear()
self.timer = SharedTimer(self.meektimeout)
def _stream_response(self, response):
try:
chunk = response.read(MAX_PAYLOAD_LENGTH)
while chunk:
log.debug("%s streaming DOWN %d bytes" % (self.sessionid, len(chunk)))
yield chunk, ""
chunk = response.read(MAX_PAYLOAD_LENGTH)
except GeneratorExit:
response.release()
raise StopIteration
def meek_response(self, response, stream):
if stream:
return self._stream_response(response)
data = response.read()
response.release()
if not data:
return [("", "")]
if not self.udpsock:
return [(data, "")]
# parse UDP packets
log.debug("%s DOWN %d bytes" % (self.sessionid, len(data)))
lengths = get_meek_meta(response.headers, HEADER_UDP_PKTS).split(",")
pos = 0
pkts = []
for length in lengths:
nxt = pos + int(length)
pkts.append((data[pos:nxt], ""))
pos = nxt
return pkts
def meek_roundtrip(self, pkts):
headers = {
HEADER_SESSION_ID: self.sessionid,
HEADER_MSGTYPE: MSGTYPE_DATA,
'Host': self.relay.hostname,
'Content-Type': "application/octet-stream",
'Connection': "Keep-Alive",
}
stream = False
if not self.udpsock and "stream" in self.relay.properties:
stream = True
headers[HEADER_MODE] = MODE_STREAM
if pkts and self.udpsock:
lengths = str(",".join([str(len(p)) for p in pkts]))
headers[HEADER_UDP_PKTS] = lengths
data = "".join(pkts)
headers['Content-Length'] = str(len(data))
for _ in range(CLIENT_MAX_TRIES):
try:
log.debug("%s UP %d bytes" % (self.sessionid, len(data)))
resp = self.httpclient.post("/", body=data, headers=headers)
if resp.status_code != 200:
# meek server always give 200, so all non-200s mean external issues.
continue
err = get_meek_meta(resp.headers, HEADER_ERROR)
if err:
return [("", err)]
else:
try:
return self.meek_response(resp, stream)
except Exception as ex:
log.error("[Exception][meek_roundtrip - meek_response]: %s" % str(ex))
resp.release()
return [("", "Data Format Error")]
except socket.timeout: # @UndefinedVariable
return [("", "timeout")]
except Exception as ex:
log.error("[Exception][meek_roundtrip]: %s" % str(ex))
gevent.sleep(CLIENT_RETRY_DELAY)
self.relay.failure += 1
return [("", "Max Retry (%d) Exceeded" % CLIENT_MAX_TRIES)]
def meek_sendrecv(self):
pkts = []
datalen = 0
while not self.l2m_queue.empty():
pkt = self.l2m_queue.get()
pkts.append(pkt)
datalen += len(pkt)
if datalen >= MAX_PAYLOAD_LENGTH:
for (resp, err) in self.meek_roundtrip(pkts):
yield (resp, err)
if err or not resp:
return
pkts = []
datalen = 0
for (resp, err) in self.meek_roundtrip(pkts):
yield (resp, err)
if err or not resp:
return
def meek_relay(self):
for (resp, err) in self.meek_sendrecv():
if err:
return err
if resp:
self.m2l_queue.put(resp)
self.l_notifier.set()
return ""
def meek_relay_thread(self):
interval = CLIENT_INITIAL_POLL_INTERVAL
while not self.finish.is_set():
try:
hasdata = self.m_notifier.wait(timeout=interval)
self.m_notifier.clear()
err = self.meek_relay()
if err:
break
if not hasdata:
interval *= CLIENT_POLL_INTERVAL_MULTIPLIER
if interval > CLIENT_MAX_POLL_INTERVAL:
interval = CLIENT_MAX_POLL_INTERVAL
except Exception as ex:
log.error("[Exception][meek_relay_thread]: %s" % str(ex))
break
self.finish.set()
def write_to_client(self, data):
if self.udpsock:
self.udpsock.sendto(data, self.last_clientaddr)
else:
self.socksconn.sendall(data)
def meek_write_to_client_thread(self):
while not self.finish.is_set():
try:
hasdata = self.l_notifier.wait(timeout=CLIENT_MAX_POLL_INTERVAL)
self.l_notifier.clear()
if not hasdata:
self.timer.count(CLIENT_MAX_POLL_INTERVAL)
if self.timer.timeout():
break
else:
self.timer.reset()
while not self.m2l_queue.empty():
data = self.m2l_queue.get()
if data:
self.write_to_client(data)
except Exception as ex:
log.error("[Exception][meek_write_to_client_thread]: %s" % str(ex))
break
self.finish.set()
def read_from_client(self, timeout):
readable, _, _ = select.select(self.allsocks, [], [], CLIENT_MAX_POLL_INTERVAL)
if not readable:
return None
if self.socksconn in readable:
if self.udpsock:
raise RelaySessionError("unexcepted read-event from tcp socket in UDP session")
data = self.socksconn.recv(MAX_PAYLOAD_LENGTH)
if not data:
raise RelaySessionError("peer closed")
return data
if self.udpsock and self.udpsock in readable:
data, addr = self.udpsock.recvfrom(MAX_PAYLOAD_LENGTH)
if not self.valid_udp_client(addr):
return None
else:
self.last_clientaddr = addr
return data
def meek_read_from_client_thread(self):
while not self.finish.is_set():
try:
data = self.read_from_client(CLIENT_MAX_POLL_INTERVAL)
if not data:
self.timer.count(CLIENT_MAX_POLL_INTERVAL)
if self.timer.timeout():
break
else:
self.timer.reset()
self.l2m_queue.put(data)
self.m_notifier.set()
except Exception as ex:
log.error("[Exception][meek_read_from_client_thread]: %s" % str(ex))
break
self.finish.set()
def proc_tcp_request(self, req):
self.l2m_queue.put(req.pack())
def relay_tcp(self):
read_thread = gevent.spawn(self.meek_read_from_client_thread)
write_thread = gevent.spawn(self.meek_write_to_client_thread)
relay_thread = gevent.spawn(self.meek_relay_thread)
# notify relay to send request
self.m_notifier.set()
[t.join() for t in (read_thread, write_thread, relay_thread)]
log.info("Session %s Ended" % self.sessionid)
def valid_udp_client(self, addr):
if self.client_associate[0] == "0.0.0.0" or \
self.client_associate[0] == "::":
return True
if self.client_associate == addr:
return True
return False
def cmd_udp_associate(self, req):
self.client_associate = (req.dstaddr, req.dstport)
self.last_clientaddr = self.client_associate
for (resp, err) in self.meek_roundtrip([req.pack()]):
if err:
return
if resp:
Reply(resp)
self.udpsock = bind_local_udp(self.socksconn)
if not self.udpsock:
request_fail(self.socksconn, req, GENERAL_SOCKS_SERVER_FAILURE)
return
self.track_sock(self.udpsock)
read_thread = gevent.spawn(self.meek_read_from_client_thread)
write_thread = gevent.spawn(self.meek_write_to_client_thread)
relay_thread = gevent.spawn(self.meek_relay_thread)
request_success(self.socksconn, *sock_addr_info(self.udpsock))
[t.join() for t in (read_thread, write_thread, relay_thread)]
log.info("Session %s Ended" % self.sessionid)
def meek_terminate(self):
headers = {
HEADER_SESSION_ID: self.sessionid,
HEADER_MSGTYPE: MSGTYPE_TERMINATE,
#'Content-Type': "application/octet-stream",
'Content-Length': "0",
'Connection': "Keep-Alive",
'Host': self.relay.hostname,
}
try:
self.httpclient.post("/", data="", headers=headers)
except:
pass
def clean(self):
self.meek_terminate()
for sock in self.allsocks:
sock.close()
#self.httpclient.close()
self.conn_pool.release(self.relay, self.httpclient)
class MeekRelayFactory(RelayFactory):
def __init__(self, relays, ca_certs="", timeout=60):
self.relays = relays
self.timeout = timeout
self.ca_certs = ca_certs
def set_relays(self, relays):
self.relays = relays
def select_relay(self):
self.relays = [r for r in self.relays if r.failure < CLIENT_MAX_FAILURE]
return random.choice(self.relays)
def create_relay_session(self, socksconn, clientaddr):
session = MeekSession(socksconn, self, self.timeout)
log.info("Session %s created for connection from %s" % (session.sessionid, str(clientaddr)))
return session
``` |
{
"source": "1776894091/wechat-zfb_step",
"score": 2
} |
#### File: 1776894091/wechat-zfb_step/lx_step.py
```python
import requests
import hashlib
import json
import time
import random
def md5(code):
res=hashlib.md5()
res.update(code.encode("utf8"))
return res.hexdigest()
def get_information(mobile,password):
header = {
'Content-Type': 'application/json; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"
}
url="http://sports.lifesense.com/sessions_service/login?systemType=2&version=4.6.7"
datas = {
"appType":6,
"clientId":md5("5454"),
"loginName":str(mobile),
"password":<PASSWORD>(<PASSWORD>)),
"roleType":0
}
response =requests.post(url,headers=header,data=json.dumps(datas))
return response.text
def update_step(step,information):
step =int(step)
url="http://sports.lifesense.com/sport_service/sport/sport/uploadMobileStepV2?version=4.5&systemType=2"
accessToken=json.loads(information)["data"]["accessToken"]
userId=json.loads(information)["data"]["userId"]
#print(accessToken)
#print(userId)
#获取当前时间和日期
timeStamp=time.time()
localTime = time.localtime(timeStamp)
strTime = time.strftime("%Y-%m-%d %H:%M:%S", localTime)
print(strTime)
measureTime=strTime+","+str(int(timeStamp))
header = {
'Cookie': 'accessToken='+accessToken,
'Content-Type': 'application/json; charset=utf-8',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"
}
sport_datas = {
"list": [
{
"DataSource":2,
"active":1,
"calories":str(int(step/4)),
"dataSource":2,
"deviceId":"M_NULL",
"distance":str(int(step/3)),
"exerciseTime":0,
"isUpload":0,
"measurementTime":measureTime,
"priority":0,
"step": str(step),
"type":2,
"updated":str(int(time.time()*1000)),
"userId":str(userId)
}]
}
result=requests.post(url,headers=header,data=json.dumps(sport_datas))
return result.text
def server_send(msg):
if sckey == '':
return
server_url = "https://sc.ftqq.com/" + str(sckey) + ".send"
data = {
'text': msg,
'desp': msg
}
requests.post(server_url, data=data)
def kt_send(msg):
if ktkey == '':
return
kt_url = 'https://push.xuthus.cc/send/'+str(ktkey)
data = ('步数刷取完成,请查看详细信息~\n'+str(msg)).encode("utf-8")
requests.post(kt_url, data=data)
def execute_walk(phone,password,step):
information=get_information(phone,password)
update_result=update_step(step,information)
result=json.loads(update_result)["msg"]
if result == '成功':
msg = "刷新步数成功!此次刷取" + str(step) + "步。"
print(msg)
server_send(msg)
kt_send(msg)
else:
msg = "刷新步数失败!请查看云函数日志。"
print(msg)
server_send(msg)
kt_send(msg)
def main():
if phone and password and step != '':
execute_walk(phone, password, step)
else:
print("参数不全,请指定参数。或者在调用中直接指定参数")
# -- 配置 --
# ------------------------------
phone = '' # 登陆账号
password = '' # 密码
step = random.randint(40000,60000) # 随机30000-40000步数
sckey = '' # server酱key(可空)
ktkey = '' # 酷推key(可空)
# ------------------------------
def main_handler(event, context):
return main()
if __name__ == '__main__':
main()
``` |
{
"source": "1777TheVikings/FRC1777-Vision",
"score": 3
} |
#### File: FRC1777-Vision/postprocessors/display.py
```python
from typing import Any, NoReturn, List
from numpy import ndarray
import xml.etree.ElementTree as ElementTree
import cv2
from base_classes import PostProcessorBase
class DisplayPostProcessor(PostProcessorBase):
"""
Outputs frames to the screen, optionally drawing the detected objects on the screen.
Configuration info:
- `Annotate`: If set to true, the location of detected objects will be drawn onto each frame.
"""
annotate = bool()
async def setup(self, component_config_root: ElementTree.Element):
self.annotate = component_config_root.find("Annotate").text in \
['true', '1', 't', 'y', 'yes']
async def cleanup(self):
pass
async def postprocess(self, data: List[Any], frame: ndarray) -> NoReturn:
output_frame = frame
if self.annotate:
for i in data:
cv2.rectangle(frame, (i.rect[0], i.rect[1]), (i.rect[2], i.rect[3]), (0, 255, 0), 2)
cv2.putText(frame, str(i.angle), (int(i.x), int(i.y)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imshow("display", output_frame)
cv2.waitKey(1)
``` |
{
"source": "17790793549/tongjier",
"score": 3
} |
#### File: 17790793549/tongjier/python_xpath_baidu_renming.py
```python
from multiprocessing import Process
from random import randint
from time import time, sleep
import os
import requests
from lxml import etree
def baocun(url):#此方法是将图片保存文件到本地 只需要传入图片地址
try:
root = "D://PYTHON//"#这是根文件所在
path=root+url.split('/')[-1]#通过’/‘把图片的url分开找到最后的那个就是带.jpg的保存起来
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(path):
r = requests.get(url)
r.raise_for_status()
with open(path,'wb') as f:#模式以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。一般用于非文本文件如图片等
f.write(r.content)#r.content返回二进制,像图片
print('爬取成功')
except Exception as e:
print(e)
def download_task(*args):
for i in args:
url = 'http://www.baidu.com/s?wd='+i
response = requests.get(url)
response.encoding = 'gbk'
wb_data = response.text
html = etree.HTML(wb_data)
print(wb_data)
def main():
start = time()
# 开启两个多进程, 函数名 传递的参数,需要注意的是,它接受的是一个元组(tuple)
p1 = Process(target=download_task, args=('韩红','胡歌','潘恩义','陈国柏','周卓浩','唐碧邦'))
p2 = Process(target=download_task, args=('郑凯','艾伦','张怀顺','何毅','邱存新','王成文'))
# 获取进程号
# 启动进程
p1.start()
p2.start()
##############
# 进程阻塞.
p1.join()
p2.join()
##############
end = time()
print(end - start)
if __name__ == '__main__':
main()
``` |
{
"source": "177arc/fpl-data",
"score": 2
} |
#### File: fpl-data/fpldata/manager.py
```python
from typing import Tuple, NoReturn, Dict
from shutil import copyfile, rmtree
from fplpandas import FPLPandas
import logging
import tempfile
import pandas as pd
import datetime as dt
from datadict import DataDict
import numpy as np
from .s3store import S3Store
from .export import export_dfs, add_data_sets_stats, export_data_sets, VERSION
from .common import Context
# Define type aliases
DF = pd.DataFrame
S = pd.Series
class FPLManagerBase:
mode: str
def create_context(self) -> Context:
raise NotImplementedError
def get_game_weeks(self) -> DF:
raise NotImplementedError
def get_teams(self) -> DF:
raise NotImplementedError
def get_teams_last_season(self) -> DF:
raise NotImplementedError
def get_fixtures(self) -> DF:
raise NotImplementedError
def get_fixtures_last_season(self) -> DF:
raise NotImplementedError
def get_players(self) -> Tuple[DF, DF, DF]:
raise NotImplementedError
def get_players_last_season(self) -> Tuple[DF, DF, DF]:
raise NotImplementedError
def get_last_season_stats_est(self) -> DF:
raise NotImplementedError
def publish_data_sets(self, variables: Dict) -> DF:
raise NotImplementedError
def assert_context(self, ctx: Context) -> NoReturn:
raise NotImplementedError
def assert_team_goal_stats_ext(self, team_goal_stats_ext: DF) -> NoReturn:
raise NotImplementedError
def assert_player_gw_next_eps_ext(self, player_gw_next_eps_ext: DF) -> NoReturn:
raise NotImplementedError
def assert_players_gw_team_eps_ext(self, players_gw_team_eps_ext: DF) -> NoReturn:
raise NotImplementedError
class FPLManager(FPLPandas, FPLManagerBase):
TEAMS_FILE = 'teams.csv' # File with team data for last season
PLAYERS_FILE = f'players.csv' # File with player data for last season
PLAYERS_HISTORY_FILE = f'players_history.csv' # File with player fixture data for last season
FIXTURES_FILE = f'fixtures.csv' # File with fixture data for last season
TEAM_STATS_EST_FILE = 'data/team_goals_stats_estimates.csv' # Path to file containing goal estimates for team that just joined the league
DATA_TEMP_DIR = f'{tempfile.gettempdir()}/fpl_data'
DATA_DIR = f'data'
DATA_SETS_FILE = f'data/data_sets.csv'
DATA_DICT_FILE = f'data/data_dictionary.csv'
DEF_FIXTURE_LOOK_BACK = 20 # Limit of how many fixtures to look back for calculating rolling team stats
DEF_PLAYER_FIXTURE_LOOK_BACK = 12 # Limit of how many fixture to look back for calculating rolling player stats
last_season: str
current_season: str
last_season_path: str
fixtures_look_back: int
player_fixtures_look_back: int
publish_s3_bucket: str
def __init__(self, last_season: str, current_season: str, publish_s3_bucket: str,
fixtures_look_back: int = DEF_FIXTURE_LOOK_BACK, player_fixtures_look_back: int = DEF_PLAYER_FIXTURE_LOOK_BACK):
self.last_season = last_season
self.current_season = current_season
self.last_season_path = f'{self.DATA_DIR}/{last_season}'
self.fixtures_look_back = fixtures_look_back
self.player_fixtures_look_back = player_fixtures_look_back
self.publish_s3_bucket = publish_s3_bucket
self.mode = 'Live'
super().__init__()
def create_context(self) -> Context:
ctx = Context()
ctx.fixtures_look_back = self.fixtures_look_back
ctx.player_fixtures_look_back = self.player_fixtures_look_back
ctx.last_season = self.last_season
ctx.current_season = self.current_season
ctx.now = dt.datetime.now()
return ctx
def get_teams_last_season(self) -> DF:
return pd.read_csv(f'{self.last_season_path}/{self.TEAMS_FILE}', index_col=['id'], na_values='None')
def get_fixtures_last_season(self) -> DF:
return pd.read_csv(f'{self.last_season_path}/{self.FIXTURES_FILE}', index_col=['id'], na_values='None')
def get_players_last_season(self) -> Tuple[DF, DF, DF]:
return (pd.read_csv(f'{self.last_season_path}/{self.PLAYERS_FILE}', index_col=['id'], na_values='None'),
None,
pd.read_csv(f'{self.last_season_path}/{self.PLAYERS_HISTORY_FILE}', index_col=['player_id', 'fixture'], na_values='None'))
def get_last_season_stats_est(self) -> DF:
# Loads estimates for team for which no history is available, in particular for those that have been promoted to the Premier League.
return pd.read_csv(self.TEAM_STATS_EST_FILE).set_index('Team Code')
def publish_data_sets(self, variables: Dict) -> DF:
logging.info(f'Publishing data sets to {self.publish_s3_bucket}/v{VERSION}/latest/ ...')
s3store = S3Store(self.publish_s3_bucket)
# Clear the data directory
rmtree(self.DATA_TEMP_DIR, ignore_errors=True)
data_sets = pd.read_csv(self.DATA_SETS_FILE).set_index('Name')
(data_sets
.pipe(add_data_sets_stats, variables)
.pipe(export_data_sets, f'{self.DATA_TEMP_DIR}/v{VERSION}', self.DATA_SETS_FILE.split("/")[-1]))
# Export data frames as CSV files.
export_dfs(variables, data_sets, f'{self.DATA_TEMP_DIR}/v{VERSION}', DataDict(data_dict_file=self.DATA_DICT_FILE))
# Copy the data dictionary and data sets file.
_ = copyfile(self.DATA_DICT_FILE, f'{self.DATA_TEMP_DIR}/v{VERSION}/{self.DATA_DICT_FILE.split("/")[-1]}')
# And off we go to S3.
s3store.save_dir(self.DATA_TEMP_DIR, f'v{VERSION}/latest/')
logging.info('Done!')
def assert_context(self, ctx: Context) -> NoReturn:
assert ctx.next_gw + len(ctx.next_gw_counts.keys()) - 1 == ctx.total_gws
def assert_team_goal_stats_ext(self, team_goal_stats_ext: DF) -> NoReturn:
# TODO: Implement some sense checks.
pass
def assert_player_gw_next_eps_ext(self, player_gw_next_eps_ext: DF) -> NoReturn:
assert player_gw_next_eps_ext[lambda df: df.isin([np.inf, -np.inf]).any(axis=1)].shape[0] == 0, 'There are inifinite values in player_gw_next_eps_ext. Run player_gw_next_eps_ext[lambda df: df.isin([np.inf, -np.inf]).any(axis=1)] to finds row with inifite values.'
def assert_players_gw_team_eps_ext(self, players_gw_team_eps_ext: DF) -> NoReturn:
assert players_gw_team_eps_ext[lambda df: df.isin([np.inf, -np.inf]).any(axis=1)].shape[0] == 0, 'There are inifinite values in players_gw_team_eps_ext. Run players_gw_team_eps_ext[lambda df: df.isin([np.inf, -np.inf]).any(axis=1)] to finds row with inifite values.'
``` |
{
"source": "177arc/pandas-datadict",
"score": 2
} |
#### File: 177arc/pandas-datadict/build_deploy.py
```python
import logging as log
import glob, os
from shell_utils import shell
log.basicConfig(level=log.INFO, format='%(message)s')
def __execute(command: str):
return print(shell(command, capture=True, silent=True).stdout)
def unitest():
log.info('Running unit tests ...')
__execute('python -m pytest tests/')
def install():
log.info('Installing package locally ...')
__execute('pip install .')
def build():
log.info('Building package ...')
list(map(lambda x: os.remove(x), glob.glob('dist/*')))
__execute('python setup.py sdist bdist_wheel')
def check():
log.info('Checking package ...')
__execute('twine check dist/*')
def doc():
log.info('Generating documentation ...')
__execute('pdoc --force --html --output-dir docs datadict datadict.jupyter')
def publish(repository='testpypi'):
log.info(f'Publishing package to {repository} ...')
__execute(f'twine upload --repository {repository} dist/*')
```
#### File: pandas-datadict/datadict/datadict.py
```python
import pandas as pd
import numpy as np
import warnings
import os
import functools
import pickle
from os import path
from pandas.api.types import is_numeric_dtype
from typing import Dict
class DataDict:
"""
This class provides functionality for mapping the columns of different data frames into a consistent namespace,
ensuring the columns to comply with the data type specified in the data dictionary and describing the data.
The data dictionary consists at least of the following columns:
* `Data Set`: Used when mapping in combination with `Field` to rename to the column to `Name`.
* `Field`: Column name of the data frame to map to `Name`.
* `Name`: Column name that is unique throughout the data dictionary.
* `Description`: Description of the column name. This can be used to provide additional information when displaying the data frame.
* `Type`: Type the column should be cast to.
* `Format`: Format to use when values need to be converted to a string representation. The format string has to be a Python format string such as `{:.0f}%`
The data dictionary can either be loaded from a CSV file or from a data frame.
"""
_data_dict_file: str
_data_dict_updated: float = None
_data_dict: pd.DataFrame
_formats: dict
_names: list
auto_reload: bool
column_names = ['Data Set', 'Field', 'Name', 'Description', 'Type', 'Format']
supported_types = ['float', 'float32', 'float64', 'int', 'int32', 'int64', 'object', 'str', 'bool', 'datetime64', 'timedelta', 'category']
stats = {'sum': 'Total', 'mean': 'Average'}
meta: object
def auto_reload(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if self.auto_reload:
self.__load()
return func(self, *args, **kwargs)
return wrapper
def __aggr(self, series: pd.Series):
funcs = self._data_dict[self._data_dict['Name'] == series.name]['Default Aggregation'].values
try:
return eval('series.' + funcs[0]) if len(funcs) == 1 and not funcs[0].isspace() else None
except:
return None
@property
def data_dict(self) -> pd.DataFrame:
"""
Data dictionary as a data frame.
"""
return self._data_dict
@property
def formats(self) -> Dict[str, str]:
"""
Dictionary that maps the columns to names to their format strings.
"""
return self._formats
def __init__(self, data_dict_file: str = None, auto_reload: bool = True, data_dict: pd.DataFrame = None):
"""
Creates the data dictionary and validates it. It can either be initialised from a CSV file or a data frame.
Args:
data_dict_file: The data dictionary file in CSV format to use to initialise the data dictionary.
auto_reload: Whether the data dictionary should automatically check for changes in the data dictionary file.
data_dict: The data dictionary as a data frame to use to initialise the data dictionary instead of the data dictionary file.
"""
if data_dict_file is not None and data_dict is not None:
raise ValueError('Parameters data_dict_file and data_dict can\'t be assigned at the same time.')
self._data_dict_file = data_dict_file
self.auto_reload = auto_reload
self.__set_data_dict(data_dict)
self.__load()
def __load(self) -> None:
"""
Loads the data dictionary from the CSV file specified during initialisation and validates it.
"""
if self._data_dict_file is None:
return
if not path.exists(self._data_dict_file):
raise ValueError(f'The data dictionary file {self._data_dict_file} does not exist.')
if self._data_dict_updated is not None and os.path.getmtime(self._data_dict_file) == self._data_dict_updated:
return
data_dict = pd.read_csv(self._data_dict_file)
self._data_dict_updated = os.path.getmtime(self._data_dict_file)
self.__set_data_dict(data_dict)
def __set_data_dict(self, data_dict: pd.DataFrame) -> None:
"""
Sets a new data dictionary frame validates it.
Args:
data_dict: Specifies the data dictionary.
"""
DataDict.validate(data_dict)
self._data_dict = data_dict
if data_dict is not None:
formats = self._data_dict[['Name', 'Format']].dropna(subset=['Format'])
self._formats = pd.Series(formats['Format'].values, index=formats['Name']).to_dict()
self._names = list(self._data_dict['Name'].values)
@staticmethod
def validate(data_dict: pd.DataFrame) -> None:
"""
Validates the given data dictionary and raises a `ValueError` if the validation fails.
Args:
data_dict: The data dictionary to validate.
Returns:
Raises:
ValueError: If the given data dictionary is not valid.
"""
if data_dict is None:
return
data_dict = data_dict.copy()
# Check that all expected columns exist.
if not set(data_dict.columns) >= set(DataDict.column_names):
raise ValueError(f'The data dictionary must at least include the following column names: {DataDict.column_names}')
# Check that all types are supported Python types.
if not set(data_dict['Type'].values) <= set(DataDict.supported_types):
raise ValueError(
f'The Type column of the data dictionary contains the following unsupported types {set(data_dict["Type"].values) - set(DataDict.supported_types)}. Only the following types are supported: {DataDict.supported_types}')
# Check that names are unique.
if any(data_dict['Name'].duplicated()):
raise ValueError(f'The Name column contains the following duplicates: {data_dict["Name"][data_dict["Name"].duplicated()].values}. The names must be unique.')
# Check that dataset and field combination is unique.
data_dict = data_dict.replace('', np.nan)
data_dict['Field ID'] = data_dict['Data Set'] + '.' + data_dict['Field']
if any(data_dict['Field ID'][data_dict['Field ID'].isnull() == False].duplicated()):
raise ValueError(f'The combination of columns Data Set and Field contains the following duplicates: {data_dict["Field ID"][data_dict["Field ID"].duplicated()].values}. The combination must be unique.')
@staticmethod
def __str_to_bool(value: str) -> object:
"""
Converts the given string to a bool if the argument is a string otherwise it returns the value untouched. `yes`, `true`, `1` are considered `True`, the rest is considered `False`.
Args:
value: The value to convert to a bool.
Returns:
The converted bool if the value is a string. Otherwise the value passed in the argument.
"""
if pd.isnull(value):
return None
if not isinstance(value, str):
return value
return value.lower() in ['yes', 'true', '1']
def df(self, data_set: str = None, any_data_set: bool = False) -> pd.DataFrame:
"""
Gets the data set with the given name as a data frame.
Args:
data_set: The data set to filter by. If this value matches a value in the `Data Set` column of the data dictionary, the matching rows are returned.
If `data_set` is not specified, the entries with empty `Data Set` are returned.
any_data_set: Whether to return all data sets in the data frame.
Returns:
The data set as a data frame, index by the `Field` column.
"""
if any_data_set and data_set is not None:
raise ValueError('Either data_set can be provided or any_data_set can be True but not both.')
if data_set is None:
data_set = ''
return self._data_dict[(self._data_dict['Data Set'] == data_set) | any_data_set].set_index('Field')
@auto_reload
def remap(self, df: pd.DataFrame, data_set: str = None, ensure_cols: bool = False, strip_cols: bool = False) -> pd.DataFrame:
"""
Renames the columns in the given data frame based on based on the `Data Set` and `Field` attributes in the data dictionary to `Name`
if such a mapping found and converts the columns data to `Type`. It also reorders the columns based on the order of the data dictionary entries.
Args:
df: The data frame to remap.
data_set: The data set to use. If this value matches a value in the `Data Set` column of the data dictionary, then the corresponding names in the `Field`
column are used to rename the columns of the given data frame to the `Name` column name. If `dataset` is not specified, the values in `Field` column
that have entries with empty `Data Set` are used.
ensure_cols: Ensures all columns in the data_set are present. If the source data frame does not contain them, empty ones are created. This parameter can
only be true if data_set is specified. This is useful when the data frame to be remapped may not have all the columns if it is empty.
strip_cols: Whether to remove all columns that are not in the data set. In any case, it will leave the index untouched.
Returns:
The remapped data frame.
"""
if df is None:
raise ValueError('Parameter df not provided.')
if (data_set is None or data_set == '') and ensure_cols:
raise ValueError('Parameter data_set cannot be None or empty if ensure_cols is True.')
dd = self.df(data_set)
types_map = dd['Type'].to_dict()
types_map = {col: typ for (col, typ) in types_map.items() if col in df.columns} # Remove mapping for columns that are not present in data frame.
# Map values of str columns.
str_cols = [col for (col, typ) in types_map.items() if typ == 'str']
df[str_cols] = df[str_cols].apply(lambda col: col.map(lambda val: val if isinstance(val, str) and val != '' else None))
# Ensure that nan is represented as None so that column type conversion does not result in object types if nan is present.
df = df.replace('', np.nan)
# Map values of bool columns.
bool_cols = [col for (col, typ) in types_map.items() if typ == 'bool']
df[bool_cols] = df[bool_cols].apply(lambda col: col.map(lambda val: self.__str_to_bool(val)))
# Treat bool and str separately 'cause all non-empty strings are converted to True.
# Map values of non-bool, non-str columns using data type.
no_bool_str_types_map = {col: typ for (col, typ) in types_map.items() if typ not in ['bool', 'str']}
df = df.astype(no_bool_str_types_map, errors='ignore')
columns_map = dd['Name'].to_dict()
df = df.rename(columns=columns_map)
df = self.reorder(df)
if ensure_cols:
df = self.ensure_cols(df, data_set=data_set)
if strip_cols:
df = self.strip_cols(df, data_set=data_set)
return df
@auto_reload
def reorder(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Reorders the given data frame based on the order of the matching entries in the data dictionary.
Args:
df: The data frame whose columns need to be reordered.
Returns:
The reordered data frame.
"""
return df[[x for x in self._names if x in list(df.columns.values)]
+ [x for x in list(df.columns.values) if x not in self._names]]
@auto_reload
def ensure_cols(self, df: pd.DataFrame, cols: list = None, data_set: str = None) -> pd.DataFrame:
"""
Ensures that the columns from the given data set or the given columns names are present the resulting data frame. Missing columns are added at the end.
Args:
df: The data frame to add the missing columns (if any) to.
data_set: The name of data set to use. If this value matches a value in the `Data Set` column of the data dictionary,
then `Name` column is used to identify missing columns. If `dataset` is not specified, the values in `Name` column
that have entries with empty `Data Set` are used.
cols: The column names to ensure are present in the returned data frame.
Returns:
The data frame with missing columns added to the end.
"""
if cols is not None and data_set is not None:
raise ValueError('Either the cols or the data_set arguments can be provided but not both.')
if cols is None:
cols = list(self.df(data_set)['Name'].values)
current_cols = list(df.columns.values)+list(df.index.names)
missing_cols = [v for v in cols if v not in current_cols]
return df.reindex(columns=(list(df.columns.values)+missing_cols))
@auto_reload
def strip_cols(self, df: pd.DataFrame, data_set: str = None, any_data_set: bool = False):
"""
Removes all columns that are not in the given data set from the given data frame or all columns that are not in any data set. It leaves
the index untouched.
Args:
df: The data frame to remove the columns from.
data_set: The name of the data set with columns to preserve.
any_data_set: Whether to remove all columns that are not in any data set.
Returns:
The data frame is only the data set columns.
"""
if any_data_set and data_set is not None:
raise ValueError('Either data_set can be provide or any_data_set can be True but not both.')
ds_cols = list(self.df(data_set, any_data_set)['Name'].values)
df_cols = [v for v in df.columns if v in ds_cols]
return df[df_cols]
@staticmethod
def add_stats(df: pd.DataFrame) -> pd.DataFrame:
"""
Adds the `Total` and `Average` of the column values as two rows at the top of the data frame.
Args:
df: The data frame to summarise.
Returns:
The data frame with the `Total` and `Average` at the top.
"""
if df is None:
raise ValueError('Parameter df is mandatory')
num_agg_map = {col: DataDict.stats.keys() for col in df if is_numeric_dtype(df[col]) and df[col].dtype != np.bool}
aggr_row = df.agg(num_agg_map).rename(DataDict.stats)
if len(df.index.names) > 1:
aggr_row = pd.concat([aggr_row], keys=[np.nan] * len(DataDict.stats.keys()), names=df.index.names[1:])
df = pd.concat([df.iloc[:0], aggr_row, df], sort=False)
# Adds the dictionary of stats to the data frame.
if not hasattr(df, 'stats'):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df.stats = {}
df.stats = {**df.stats, **DataDict.stats}
return df
@staticmethod
def has_stats(df: pd.DataFrame):
"""
Checks whether the given data frame has stats rows added at the top of the data frame.
Args:
df: The data frame to check.
Returns:
Whether the given data frame has stats.
"""
return hasattr(df, 'stats')
def format(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Formats the data frame based on the `Format` attribute in the data dictionary.
Args:
df: The data frame to format.
Returns:
The formatted data frame.
"""
if df is None:
raise ValueError('Parameter df is mandatory')
# Necessary to define separate function instead of using lambda directly (see https://stackoverflow.com/questions/36805071/dictionary-comprehension-with-lambda-functions-gives-wrong-results)
def make_func(f: str = None):
def format_value(x):
if f is None or f == '':
return x if not pd.isnull(x) else '-'
return f.format(x) if not pd.isnull(x) else '-'
# If mean is part of the stats, then the integer numbers need to be formatted as floats because the mean of integers can be float.
if self.has_stats(df) and 'mean' in df.stats.keys() and f is not None:
f = f.replace(':d', ':.1f')
return lambda x: format_value(x)
# Assembles a dictionary with columns as key and format functions as values but only for the columns that are actually in the data frame.
formats = {col: make_func(f) for (col, f) in self._formats.items() if col in df.columns.values}
formats = {**formats, **{col: make_func() for col in set(df.columns.values) - set(self._formats.keys())}}
df = df.copy()
for col, value in formats.items():
try:
df[col] = df[col].apply(value)
except ValueError as e:
warnings.warn(f'A value in column {col} could not be formatted.\nError message: {e}')
return df
def __hash__(self):
"""
Calculates the hash value of the data dictionary by calculating the hash value of the data dictionary data frame.
Returns:
The hash value of the data dictionary.
"""
return hash(pickle.dumps(self.data_dict))
DataDict.meta = DataDict(data_dict=pd.DataFrame.from_dict(orient='index',
data={0: ['data_dict', 'data_set', 'Data Set', 'Used when mapping in combination with Field to rename to the column to Name.', 'str', '{:s}'],
1: ['data_dict', 'field', 'Field', 'Column name of the data frame to map to Name.', 'str', '{:s}'],
2: ['data_dict', 'name', 'Name', 'Column name that is unique throughout the data dictionary.', 'str', '{:s}'],
3: ['data_dict', 'description', 'Description', 'Description of the column name. This can be used to provide additional information when displaying the data frame.', 'str',
'{:s}'],
4: ['data_dict', 'type', 'Type', 'Type the column should be cast to.', 'str', '{:s}'],
5: ['data_dict', 'format', 'Format',
'Format to use when values need to be converted to a string representation. The format string has to be a Python format string such as {:.0f}%', 'str', '{:s}']},
columns=['Data Set', 'Field', 'Name', 'Description', 'Type', 'Format']))
```
#### File: tests/jupyter/test_datadict_jupyter.py
```python
import unittest
import pandas as pd
import ipywidgets as widgets
from datadict.jupyter import DataDict
class TestDataDictJupyter(unittest.TestCase):
_dd = DataDict(data_dict=pd.DataFrame.from_dict(orient='index',
data={0: ['data_set_1', 'field_1', 'Name 1', 'Description 1', 'str', ''],
1: ['data_set_1', 'field_2', 'Name 2', 'Description 2', 'int', '{:d}'],
2: ['data_set_1', 'field_3', 'Name 3', 'Description 3', 'bool', '{:}'],
3: ['data_set_1', 'field_4', 'Name 4', 'Description 4', 'float', '£{:.1f}m'],
4: ['data_set_1', 'field_5', 'Name 5', 'Description 5', 'datetime64', '']},
columns=['Data Set', 'Field', 'Name', 'Description', 'Type', 'Format']))
def test_display(self):
data = [{'field_1': 'test 1', 'field_2': '1', 'field_3': 'True', 'field_4': '1.1', 'field_5': '2019-01-01', 'field_6': 'bayern', },
{'field_1': 'test 2', 'field_2': '2', 'field_3': 'FALSE', 'field_4': '1.2', 'field_5': '2019-01-02', 'field_6': 'bayern'},
{'field_1': 'test 3', 'field_2': '3', 'field_3': '', 'field_4': '', 'field_5': '', 'field_6': 'bayern'}]
df = pd.DataFrame.from_records(data)
out = self._dd.display(df)
self.assertIsInstance(out, widgets.VBox)
self.assertEqual(len(out.children), 3)
self.assertIsInstance(out.children[0], widgets.Output)
self.assertIsInstance(out.children[1], widgets.HBox)
self.assertIsInstance(out.children[1].children[0], widgets.HTML)
self.assertEqual(out.children[1].children[0].value, '3 rows x 6 columns')
self.assertIsInstance(out.children[2], widgets.Accordion)
def test_display_dont_show_footer(self):
data = [{'field_1': 'test 1', 'field_2': '1', 'field_3': 'True', 'field_4': '1.1', 'field_5': '2019-01-01', 'field_6': 'bayern', },
{'field_1': 'test 2', 'field_2': '2', 'field_3': 'FALSE', 'field_4': '1.2', 'field_5': '2019-01-02', 'field_6': 'bayern'},
{'field_1': 'test 3', 'field_2': '3', 'field_3': '', 'field_4': '', 'field_5': '', 'field_6': 'bayern'}]
df = pd.DataFrame.from_records(data)
out = self._dd.display(df, footer=False)
self.assertIsInstance(out, widgets.VBox)
self.assertEqual(len(out.children), 2)
self.assertIsInstance(out.children[0], widgets.Output)
self.assertIsInstance(out.children[1], widgets.Accordion)
def test_display_dont_show_descriptions(self):
data = [{'field_1': 'test 1', 'field_2': '1', 'field_3': 'True', 'field_4': '1.1', 'field_5': '2019-01-01', 'field_6': 'bayern', },
{'field_1': 'test 2', 'field_2': '2', 'field_3': 'FALSE', 'field_4': '1.2', 'field_5': '2019-01-02', 'field_6': 'bayern'},
{'field_1': 'test 3', 'field_2': '3', 'field_3': '', 'field_4': '', 'field_5': '', 'field_6': 'bayern'}]
df = pd.DataFrame.from_records(data)
out = self._dd.display(df, descriptions=False)
self.assertIsInstance(out, widgets.VBox)
self.assertEqual(len(out.children), 2)
self.assertIsInstance(out.children[0], widgets.Output)
self.assertIsInstance(out.children[1], widgets.HBox)
self.assertIsInstance(out.children[1].children[0], widgets.HTML)
self.assertEqual(out.children[1].children[0].value, '3 rows x 6 columns')
``` |
{
"source": "17843/gtin",
"score": 3
} |
#### File: 17843/gtin/is_gtin.py
```python
def is_gtin(codestr):
""" Tests a string input as to whether it's a valid GTIN product code - disregarding the length of the string """
try:
d = map(int, tuple(codestr))[:-1]
n = (3,1,3,1,3,1,3,1,3,1,3,1,3,1,3,1,3)[18-len(d):]
checksum = 0
for a1, a2 in zip(d, n):
checksum += a1 * a2
return abs(checksum % -10) == codestr[-1]
except:
return False
``` |
{
"source": "1786016767jjk/pj",
"score": 2
} |
#### File: apps/front/urls.py
```python
from flask.views import MethodView
from apps.front.forms import SendSmsCodeForm,SignupFrom,FindpwdFrom,SendCodeForm,AddPostForm,SigninFrom
from flask import Blueprint,make_response
from flask import render_template,session
from flask import views,request,jsonify
import string,random
from apps.common.baseResp import *
import json
from dysms_python.demo_sms_send import send_sms
from apps.common.captcha.xtcaptcha import Captcha
from io import BytesIO
from apps.common.memcachedUtil import saveCache,delete,getCache
from apps.front.models import *
from apps.common.models import Banner,Board,Post
from functools import wraps
from config import FRONT_USER_ID
from flask import redirect
from flask import url_for
#
bp = Blueprint('front',__name__)
def lonigDecotor(func):
"""限制登录的装饰器"""
@wraps(func)
def inner(*args,**kwargs):
if not session.get(FRONT_USER_ID,None): # 没有登陆
return redirect(location=url_for("front.signin"))
else:
r = func(*args,**kwargs)
return r
return inner
@bp.route("/")
def loginView():
# 查出来轮播图(4)
banners = Banner.query.order_by(Banner.priority.desc()).limit(4)
board = Board.query.all()
posts=Post.query.all()
context = {
'banners':banners,
'boards':board,
'posts':posts
}
return render_template("front/index.html",**context)
class Signup(MethodView):
def get(self):
# 从那个页面点击的注册按钮 (Referer: http://127.0.0.1:9000/signin/)
location = request.headers.get("Referer")
if not location : # 如果直接输入的注册的连接,location为空
location = '/'
context = {
'location':location
}
return render_template("front/signup.html",**context)
class Signup(MethodView):
def get(self):
return render_template("front/signup.html")
def post(self):
fm = SigninFrom(formdata=request.form)
if fm.validate():
# 把这个用户保存到数据库中
u = FrontUser(telephone=fm.telephone.data,
username=fm.username.data,
password=fm.password.data)
db.session.add(u)
db.session.commit()
delete(fm.telephone.data) #注册成功,删除手机验证码
return jsonify(respSuccess("注册成功,真不容易啊"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/send_sms_code/",methods=['post'])
def sendSMSCode():
fm = SendSmsCodeForm(formdata=request.form)
if fm.validate():
#生成验证码
source = string.digits
source = ''.join(random.sample(source, 4))
#发送验证码
r = send_sms(phone_numbers=fm.telephone.data,smscode=source) #b'{"Message":"OK","RequestId":"26F47853-F6CD-486A-B3F7-7DFDCE119713","BizId":"102523637951132428^0","Code":"OK"}'
if json.loads(r.decode("utf-8"))['Code'] == 'OK':
# 存到缓存中
saveCache(fm.telephone.data,source,30*60)
return jsonify(respSuccess("短信验证码发送成功,请查收"))
else: # 发送失败
return jsonify(respParamErr("请检查网络"))
else:
return jsonify(respParamErr(fm.err))
@bp.route("/img_code/")
def ImgCode():
# 生成6位的字符串
# 把这个字符串放在图片上
# 用特殊字体
# 添加横线
# 添加噪点
text,img = Captcha.gene_code() # 通过工具类生成验证码
print(text)
out = BytesIO() # 初始化流对象
img.save(out, 'png') # 保存成png格式
out.seek(0) # 从文本的开头开始读
saveCache(text,text,60)
resp = make_response(out.read()) # 根据流对象生成一个响应
resp.content_type = "image/png" # 设置响应头中content-type
return resp
class Singnin(MethodView):
def get(self):
return render_template("front/signin.html")
def post(self):
fm=SigninFrom(formdata=request.form)
if fm.validate():
#通过电话查询密码
user=FrontUser.query.filter(FrontUser.telephone==fm.telephone.data).first()
if not user:
return jsonify(respParamErr("未注册"))
# 密码进行比较
r=user.checkPwd(fm.password.data)
if r :
return jsonify(respSuccess("登录成功"))
else:
return jsonify(respParamErr("密码错误"))
else:
return jsonify(respParamErr(fm.err))
class Addpost(views.MethodView):
decorators = [lonigDecotor]
def get(self):
# 查询所有的板块
board = Board.query.all()
context = {
"boards": board
}
return render_template("front/addpost.html",**context)
def post(self):
fm = AddPostForm(formdata=request.form)
if fm.validate() :
# 存储到数据库中
user_id = session[FRONT_USER_ID]
post = Post(title=fm.title.data,content=fm.content.data,
board_id=fm.boarder_id.data,user_id=user_id)
db.session.add(post)
db.session.commit()
return jsonify(respSuccess("发布成功"))
else:
print(respParamErr(fm.err))
return jsonify(respParamErr(fm.err))
bp.add_url_rule("/addpost/",endpoint='addpost',view_func=Addpost.as_view('addpost'))
bp.add_url_rule("/signin/",endpoint='signin',view_func=Singnin.as_view('signin'))
bp.add_url_rule("/signup/",endpoint='signup',view_func=Signup.as_view('signup'))
# 验证码
# 在阿里云申请账号
# 申请accesskey
# 申请签名和模板
# 下载pythondemo
# 修改demo中demo_sms_send.py
# 在项目中进行调用
# 图片验证码
# 1.使用PIL这个库生成图片验证码
# 2.返回给客户端
# 3.通过js变换scr的值进行切换图片
``` |
{
"source": "1786016767jjk/qwe",
"score": 2
} |
#### File: apps/goods/urls.py
```python
from django.urls import path,include
from goods.views import BannerView
from goods.views import GoodsView,CategaryView
# goods_list = GoodsView.as_view({ # 一个资源对应的一个接口
# 'get': 'list',
# })
#
from rest_framework.routers import DefaultRouter
from goods.views import GoodsView
router = DefaultRouter() # 创建一个路由
router.register(r'lst', GoodsView) # viewset
router.register(r"category",CategaryView)
router.register(r'banner',BannerView)
# router.register(r"banner",BannerView)
# router.register(r"hot_searchs",HotSearchsView)
# def createToken(request):
# from user.models import User
# from rest_framework.authtoken.models import Token
#
# for user in User.objects.all():
# Token.objects.get_or_create(user=user)
urlpatterns = [
# path('lst/', goods_list, name='goods_list'),
path('', include(router.urls)),
# path('token',createToken)
]
```
#### File: apps/trade/serializer.py
```python
from rest_framework import serializers
from goods.serializer import GoodsSerializer
from .models import ShoppingCart,OrderGoods,OrderInfo
from goods.models import Goods
# 获取购物车所有数据
# 根据购物车id 获取商品的数据
# 更新购物车 (更新数量)
# 库存的变化
# 根据购物车id 删除商品
# 商品的库存需要添加
# 添加购物车
# 查看购物车有没有这个商品, 如果有这个商品, 数量添加, 否则添加一条数据
# 商品的库存需要减少
class ShoppingCartSerializers(serializers.Serializer):
user = serializers.HiddenField(
default=serializers.CurrentUserDefault() # 当前用户
)
nums = serializers.IntegerField(required=True,min_value=1,
error_messages={
'required':'nums是必须写',
'min_value':'最小为1'
})
goods = serializers.PrimaryKeyRelatedField(many=False,queryset=Goods.objects.all())
class Meta:
model = ShoppingCart
fields = '__all__'
# def create(self, validated_data):
# # 创建的时候
#
# def update(self, instance, validated_data):
# # 更新和删除
# 购物车
class ShoppingCartDetailSerializers(serializers.ModelSerializer):
goods = GoodsSerializer()
class Meta:
model = ShoppingCart
fields = '__all__'
class OrderInfoSerializers(serializers.ModelSerializer):
user = serializers.HiddenField(default=serializers.CurrentUserDefault())
# 订单号唯一
order_sn = serializers.CharField(read_only=True)
# 微信支付会用到
nonce_str = serializers.CharField(read_only=True)
# 支付宝交易号
trade_no = serializers.CharField(read_only=True)
# 支付状态
pay_status = serializers.CharField(read_only=True)
pay_time = serializers.DateTimeField(read_only=True)
# 支付的url
alipay_url = serializers.SerializerMethodField(read_only=True)
# get_字段名
def get_alipay_url(self, obj):
from trade.util.aliPay import AliPay
alipay = AliPay(
# 沙箱里面的appid值
appid="2016092000553317",
# notify_url是异步的url
app_notify_url="http://192.168.3.11:8000/alipay/return/",
# 我们自己商户的密钥的路径
app_private_key_path="apps/trade/keys/siyao.txt",
# 支付宝的公钥
alipay_public_key_path="apps/trade/keys/zhifugongyao.txt", # 支付宝的公钥,验证支付宝回传消息使用,不是你自己的公钥,
# debug为true时使用沙箱的url。如果不是用正式环境的url
debug=True, # 默认False,
return_url="http://192.168.3.11:8000/alipay/return/"
)
url = alipay.direct_pay(
# 订单标题
subject=obj.order_sn,
# 我们商户自行生成的订单号
out_trade_no=obj.order_sn,
# 订单金额
total_amount=obj.order_mount,
# 成功付款后跳转到的页面,return_url同步的url
return_url="http://192.168.3.11:8000/alipay/return/"
)
# 将生成的请求字符串拿到我们的url中进行拼接
re_url = "https://openapi.alipaydev.com/gateway.do?{data}".format(data=url)
return re_url
class Meta:
model = OrderInfo
fields = '__all__'
def generate_order_sn(self):
# 生成订单号
# 当前时间+userid+随机数
from random import Random
import time
random_ins = Random()
order_sn = "{time_str}{userid}{ranstr}".format(time_str=time.strftime("%Y%m%d%H%M%S"),
userid=self.context["request"].user.id,
ranstr=random_ins.randint(10, 99))
return order_sn
# 订单号 订单时间用户id随机数
def create(self, validated_data):
validated_data['order_sn'] = self.generate_order_sn() # 必须是唯一
return OrderInfo.objects.create(**validated_data)
class OrderGoodsSerializers(serializers.ModelSerializer):
class Meta:
model = OrderGoods
fields = '__all__'
``` |
{
"source": "1786939789/tools",
"score": 3
} |
#### File: 1786939789/tools/main.py
```python
import os
from tqdm import tqdm
from xml_util import xml_to_json
from file_util import check_dir
from image_util import plot_dir_box
def temporary_storage():
""" temporary storage
Args:
"""
# add bboxes onto images
txt_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/original-detect-result/face/"
images_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/keyframe/"
save_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/visualization/face/"
pbar = tqdm(os.listdir(images_dir))
for image_dir in pbar:
pbar.set_description("Processing {}/{}".format(images_dir, image_dir))
if not os.path.isdir(os.path.join(images_dir, image_dir)):
continue
txt_file = os.path.join(txt_dir, image_dir+".txt")
check_dir(os.path.join(save_dir, image_dir))
plot_dir_box(os.path.join(images_dir, image_dir), os.path.join(save_dir, image_dir), txt_file)
# # add bboxes onto images
# json_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/json_final/"
# images_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/keyframe/"
# save_dir = "E:/Study/data/StoryLine/TA2/kairos_data/LDC2020E33_KAIROS/visualization/face/"
# pbar = tqdm(os.listdir(images_dir))
# for image_dir in pbar:
# pbar.set_description("Processing {}/{}".format(images_dir, image_dir))
# if not os.path.isdir(os.path.join(images_dir, image_dir)):
# continue
# json_file = os.path.join(json_dir, image_dir+".json")
# check_dir(os.path.join(save_dir, image_dir))
# with open(json_file, "r") as f:
# json_data = json.load(f)
# image_list = os.listdir(os.path.join(images_dir, image_dir))
# image_list.sort()
# for i, image in enumerate(image_list):
# suffix = os.path.splitext(image)[-1]
# if suffix not in [".jpg", "jpeg", "png"]:
# continue
# img = cv2.imread(os.path.join(images_dir, image_dir, image))
# for entity in json_data["entities"]:
# if "label" in entity and entity["label"] == "face":
# frame_index = int(entity["id"].split("-")[-3][1:])
# if frame_index == i:
# plot_one_box(img, entity["bbox"], entity["id"], color=(0, 0, 255))
# check_dir(os.path.join(save_dir, image_dir))
# cv2.imwrite(os.path.join(save_dir, image_dir, image), img)
if __name__ == "__main__":
# add bboxes onto images
txt_dir = "F:/Data/kairos/data/scenario_data/original-detect-result/ocr-2/"
images_dir = "F:/Data/kairos/data/scenario_data/image/"
save_dir = "F:/Data/kairos/data/scenario_data/visualization-ocr/"
# category_list = ["obj-v", "obj-v", "obj-v", "obj-v", "obj-v", "obj-v"]
pbar = tqdm(os.listdir(images_dir))
for image_dir in pbar:
pbar.set_description("Processing {}/{}".format(images_dir, image_dir))
if not os.path.isdir(os.path.join(images_dir, image_dir)):
continue
txt_file = os.path.join(txt_dir, image_dir+".txt")
check_dir(os.path.join(save_dir, image_dir))
plot_dir_box(os.path.join(images_dir, image_dir), os.path.join(save_dir, image_dir), txt_file)
``` |
{
"source": "17876zjc/QQ-Group-Repeater",
"score": 3
} |
#### File: QQ-Group-Repeater/module/DueHelper.py
```python
import json
import time
import requests
class DueHelper:
def __init__(self, token):
self.sess = requests.Session()
self.baseurl = 'https://umjicanvas.com/api/v1'
self.token = token
self.courses = self.getCourseID()
def getCourseID(self):
res = {}
page = 1
while True:
url = f"{self.baseurl}/courses?" + \
f"access_token={self.token}&" + \
f"page={page}"
courses = self.sess.get(url).json()
if not courses:
break
for course in courses:
# print(course)
res[course['id']] = course.get('course_code', course.get('name'))
page += 1
return res
def getDue(self):
re = []
for courseID, courseName in self.courses.items():
url = f"{self.baseurl}/courses/{courseID}/assignments" + \
f"?access_token={self.token}&bucket=upcoming"
dues = self.sess.get(url).json()
for due in dues:
if type(due) != dict or due.get('due_at') is None:
continue
timeStamp = time.mktime(
time.strptime(due['due_at'], "%Y-%m-%dT%H:%M:%SZ"))
re.append([courseName, due['name'].strip(), timeStamp])
return sorted(re, key=lambda item: item[2])
def getDueStr(self):
re = []
dues = self.getDue()
length = str(max([len(due[1]) for due in dues]))
fmt = "{:6s} {:" + length + "s} Due Time"
re.append(fmt.format("ID", "Name"))
fmt = "{:6s} {:" + length + "s} {}"
for due in dues:
timeStr = time.strftime("%m/%d %H:%M",
time.localtime(due[2] + 8 * 60 * 60))
re.append(fmt.format(due[0], due[1], timeStr))
return '\n'.join(re)
# print(DueHelper(token).getDueStr())
```
#### File: 17876zjc/QQ-Group-Repeater/tenhou2.py
```python
from numpy import double
from sympy import false, true
from util import load_json
import requests
import json
import time
import urllib.parse
url = "https://nodocchi.moe/api/listuser.php?name="
urlrank = "https://nodocchi.moe/s/ugr/***.js"
levelmap = {
0: {'name': '新人', 'initscore': 0, 'maxscore': 20, 'haslower': False, 'losescore': [0,0]},
1: {'name': '9级', 'initscore': 0, 'maxscore': 20, 'haslower': False, 'losescore': [0,0]},
2: {'name': '8级', 'initscore': 0, 'maxscore': 20, 'haslower': False, 'losescore': [0,0]},
3: {'name': '7级', 'initscore': 0, 'maxscore': 20, 'haslower': False, 'losescore': [0,0]},
4: {'name': '6级', 'initscore': 0, 'maxscore': 40, 'haslower': False, 'losescore': [0,0]},
5: {'name': '5级', 'initscore': 0, 'maxscore': 60, 'haslower': False, 'losescore': [0,0]},
6: {'name': '4级', 'initscore': 0, 'maxscore': 80, 'haslower': False, 'losescore': [0,0]},
7: {'name': '3级', 'initscore': 0, 'maxscore': 100, 'haslower': False, 'losescore': [0,0]},
8: {'name': '2级', 'initscore': 0, 'maxscore': 100, 'haslower': False, 'losescore': [10,15]},
9: {'name': '1级', 'initscore': 0, 'maxscore': 100, 'haslower': False, 'losescore': [20,30]},
10: {'name': '初段', 'initscore': 200, 'maxscore': 400, 'haslower': True, 'losescore': [30,45]},
11: {'name': '二段', 'initscore': 400, 'maxscore': 800, 'haslower': True, 'losescore': [40,60]},
12: {'name': '三段', 'initscore': 600, 'maxscore': 1200, 'haslower': True, 'losescore': [50,75]},
13: {'name': '四段', 'initscore': 800, 'maxscore': 1600, 'haslower': True, 'losescore': [60,90]},
14: {'name': '五段', 'initscore': 1000, 'maxscore': 2000, 'haslower': True, 'losescore': [70,105]},
15: {'name': '六段', 'initscore': 1200, 'maxscore': 2400, 'haslower': True, 'losescore': [80,120]},
16: {'name': '七段', 'initscore': 1400, 'maxscore': 2800, 'haslower': True, 'losescore': [90,135]},
17: {'name': '八段', 'initscore': 1600, 'maxscore': 3200, 'haslower': True, 'losescore': [100,150]},
18: {'name': '九段', 'initscore': 1800, 'maxscore': 3600, 'haslower': True, 'losescore': [110,165]},
19: {'name': '十段', 'initscore': 2000, 'maxscore': 4000, 'haslower': True, 'losescore': [120,180]},
20: {'name': '天凤', 'initscore': 2200, 'maxscore': 100000, 'haslower': False, 'losescore': [0,0]}
}
ptchange = {
'4': [{
'0': (20, 10, 0),
'1': (40, 10, 0),
'2': (50, 20, 0),
'3': (60, 30, 0)
},
{
'0': (30, 15, 0),
'1': (60, 15, 0),
'2': (75, 30, 0),
'3': (90, 45, 0)
}],
'old4': {
'0': (30, 0, 0),
'1': (40, 10, 0),
'2': (50, 20, 0),
'3': (60, 30, 0)
}
}
def getRank(list,name):
currank = currpt = 0
lasttime = thistime = 0
for i in list:
if lasttime == 0:
lasttime = thistime = int(i['starttime'])
else:
thistime = int(i['starttime'])
if (thistime-lasttime) > 60*60*24*180 and currank < 16:
currank = currpt = 0
lasttime = thistime
if((i['sctype'] == "b" or i['sctype'] == "c") and i['playernum'] == "4"):
lv = int(i['playerlevel'])
len = int(i['playlength'])
pt = 0
for j in range(1,5):
if i['player'+str(j)] == str(name):
pt = double(i['player'+str(j)+'ptr'])
break
rank = 1
for j in range(1,5):
if double(i['player'+str(j)+'ptr']) > pt:
rank = rank+1
ptDelta = 0
flag = true
if(lv == 0 and len == 1):
t = time.localtime(thistime)
if t.tm_year<=2017:
if t.tm_mon<=10:
if t.tm_mday <= 22 or (t.tm_mday == 23 and t.tm_hour <23):
flag = false
if rank == 1:
ptDelta = 30
elif rank == 2 or ptDelta == 3:
ptDelta = 0
else:
ptDelta = 0 - levelmap[currank]['losescore'][len-1]
if flag == true:
if rank == 4:
ptDelta = 0 - levelmap[currank]['losescore'][len-1]
else:
ptDelta = ptchange['4'][len-1][str(lv)][rank-1]
#print(ptDelta)
currpt = currpt + ptDelta
if(currpt >= levelmap[currank]['maxscore']):
currank = currank + 1
currpt = levelmap[currank]['initscore']
elif(currpt < 0 ):
if(levelmap[currank]['haslower'] == True):
currank = currank - 1
currpt = levelmap[currank]['initscore']
else:
currpt = 0
if currank==20:
currpt = 2200
return (currank,currpt)
def getinfo(name):
maxrank = currank = 0
maxpt = currpt = levelmap[currank]['initscore']
position = [0,0,0,0]
tar = url+urllib.parse.quote(str(name))
r = requests.get(tar)
res = json.loads(r.text)
if(res == False):
return "没有找到该玩家!"
lasttime = thistime = 0
for i in res['list']:
if lasttime == 0:
lasttime = thistime = int(i['starttime'])
else:
thistime = int(i['starttime'])
if (thistime-lasttime) > 60*60*24*180 and currank < 16:
maxrank = maxpt = currank = currpt = 0
position = [0,0,0,0]
lasttime = thistime
if((i['sctype'] == "b" or i['sctype'] == "c") and i['playernum'] == "4"):
lv = int(i['playerlevel'])
len = int(i['playlength'])
pt = 0
for j in range(1,5):
if i['player'+str(j)] == str(name):
pt = double(i['player'+str(j)+'ptr'])
break
rank = 1
for j in range(1,5):
if double(i['player'+str(j)+'ptr']) > pt:
rank = rank+1
position[rank-1] = position[rank-1] + 1
ptDelta = 0
flag = true
if(lv == 0 and len == 1):
t = time.localtime(thistime)
if t.tm_year<=2017:
if t.tm_mon<=10:
if t.tm_mday <= 22 or (t.tm_mday == 23 and t.tm_hour <23):
flag = false
if rank == 1:
ptDelta = 30
elif rank == 2 or ptDelta == 3:
ptDelta = 0
else:
ptDelta = 0 - levelmap[currank]['losescore'][len-1]
if flag == true:
if rank == 4:
ptDelta = 0 - levelmap[currank]['losescore'][len-1]
else:
ptDelta = ptchange['4'][len-1][str(lv)][rank-1]
#print(ptDelta)
currpt = currpt + ptDelta
if(currpt >= levelmap[currank]['maxscore']):
currank = currank + 1
currpt = levelmap[currank]['initscore']
#print("\t升段至 "+levelmap[currank]['name'])
elif(currpt < 0 ):
if(levelmap[currank]['haslower'] == True):
currank = currank - 1
currpt = levelmap[currank]['initscore']
#print("\t降段至 "+levelmap[currank]['name'])
else:
currpt = 0
if currank > maxrank or (currank == maxrank and currpt > maxpt):
maxrank = currank
maxpt = currpt
if currank==20:
maxpt = currpt = 2200
maxrank = 20
ans = (name+"\n当前段位: "+levelmap[currank]['name']+" "+str(currpt)+"pt")
if (currank == maxrank and currpt == maxpt):
ans = ans + "★"
ans = ans + ("\n历史最高: "+levelmap[maxrank]['name']+" "+str(maxpt)+"pt")
if ("4" in res["rate"]):
ans = ans+("\n推定R值: R"+str(res["rate"]["4"]))
tarrank = str(urlrank).replace("***",name)
r1 = requests.get(tarrank)
res1 = json.loads(r1.text)
if "4" in res1:
ans = ans+"\n段位排名: "+str(res1['4']['graderank'])+" 名"
gamenum = position[0]+position[1]+position[2]+position[3]
ans = ans+"\n\n总计对战: "+str(gamenum)+ " 场\n"
ans = ans+"一位: "+str(position[0])+ " 场\t"+ str(round(float(position[0]/gamenum)*100,2))+"%\n"
ans = ans+"二位: "+str(position[1])+ " 场\t"+ str(round(float(position[1]/gamenum)*100,2))+"%\n"
ans = ans+"三位: "+str(position[2])+ " 场\t"+ str(round(float(position[2]/gamenum)*100,2))+"%\n"
ans = ans+"四位: "+str(position[3])+ " 场\t"+ str(round(float(position[3]/gamenum)*100,2))+"%\n"
ans = ans+"平均顺位: "+str(round((position[0]*1+position[1]*2+position[2]*3+position[3]*4)/gamenum,3))
return ans
```
#### File: 17876zjc/QQ-Group-Repeater/wgsche.py
```python
from itertools import count
import coolq
import asyncio
import json
from Repeater import aioGet
from Bot import Bot
import wg
import time
import requests
url = "https://nodocchi.moe/s/wg.js"
wgurl = "http://tenhou.net/0/?wg="
async def wgSche():
print("In wg checking")
with open("/root/QQ/QQ-Group-Repeater/wglist.json",'r',encoding='utf-8') as load_f:
load_dict = json.load(load_f)
r = requests.get(url)
res = json.loads(r.text)
checkedname = []
for i in res:
count = -1
for j in i["players"]:
count = count+1
for k in load_dict:
if (k['id'] not in checkedname) and (k['id'] == j['name']) and (i["info"]["id"] != k["currgame"]):
checkedname.append(k['id'])
k["currgame"] = i["info"]["id"]
if (i["info"]["starttime"] not in k["recentgame"]):
k["recentgame"].append(i["info"]["starttime"])
with open("/root/QQ/QQ-Group-Repeater/wglist.json",'w',encoding='utf-8') as f:
json.dump(load_dict, f,ensure_ascii=False)
msg = k['id']+" 正在乱杀, 快来围观:\n"
if (i["info"]["playernum"] == 4):
msg = msg + "四"
elif (i["info"]["playernum"] == 3):
msg = msg + "三"
if (i["info"]["playerlevel"] == 2):
msg = msg + "特"
elif (i["info"]["playerlevel"] == 3):
msg = msg + "鳳"
if (i["info"]["playlength"] == 1):
msg = msg + "东"
elif (i["info"]["playlength"] == 2):
msg = msg + "南"
if (i["info"]["kuitanari"] == 1):
msg = msg + "喰"
if (i["info"]["akaari"] == 1):
msg = msg + "赤"
if (i["info"]["rapid"] == 1):
msg = msg + "速"
t = time.localtime(i["info"]["starttime"])
th = t.tm_hour
tm = t.tm_min
if th < 10:
th = "0" + str(th)
else:
th = str(th)
if tm < 10:
tm = "0" + str(tm)
else:
tm = str(tm)
msg = msg + " " + str(th)+":" + str(tm)+"\n"
msg = msg + wgurl + i["info"]["id"]+"&tw="+str(count)+"\n"
msg = msg + (i["players"][0]["name"]+ " " + i["players"][1]["name"]+" "
+ i["players"][2]["name"])
if (i["info"]["playernum"] == 4):
msg = msg + " " + i["players"][3]["name"]
for group_id in k["groupid"]:
await coolq.bot.send({'group_id': group_id}, message=msg)
time.sleep(1)
loop = asyncio.get_event_loop()
result = loop.run_until_complete(wgSche())
loop.close()
``` |
{
"source": "17891691876/HupuTest",
"score": 2
} |
#### File: HupuTest/businessView/login_View.py
```python
import logging
from common.common_fun import Common,By
from common.desired_caps import appium_desired
from selenium.common.exceptions import NoSuchElementException
import time
class LoginView(Common):
usermore = (By.ID,"com.hupu.games:id/btn_mytab")
login_button = (By.ID,"com.hupu.games:id/bt_quick_other")
mobile_login_new =(By.ID,"com.hupu.games:id/mobile_login_new")
acount_login = (By.ID,"com.hupu.games:id/bt_accout_login")
username_type = (By.ID,"com.hupu.games:id/username_text")
password_type = (By.ID,"<PASSWORD>:id/password_text")
login_submit = (By.ID,"com.hupu.games:id/bt_submit")
def login_action(self,username,password):
logging.info(">>>>>>>>start login<<<<<<<<<")
time.sleep(5)
self.driver.find_element(*self.usermore).click()
try:
self.driver.find_element(*self.mobile_login_new).click()
except NoSuchElementException:
pass
else:
self.driver.find_element(*self.login_button).click()
self.driver.find_element(*self.mobile_login_new).click()
self.driver.find_element(*self.acount_login).click()
#self.driver.find_element(*self.login_button).click()
self.driver.find_element(*self.username_type).send_keys(username)
self.driver.find_element(*self.password_type).send_keys(password)
self.driver.find_element(*self.login_submit).click()
if __name__ == '__main__':
driver=appium_desired()
l=LoginView(driver)
l.login_action('17891691876','fuyangdi123')
```
#### File: HupuTest/test_case/test_startAd_case.py
```python
from common.myunit import StartEnd
import logging
from businessView.startPage import start_ad
from businessView.navigationPage import navigation_button
import unittest
class startAdCase(StartEnd,unittest.TestCase):
def test_isexist_ad(self):
start_ad2 = start_ad(self.driver)
#应该有广告时
self.assertTrue(start_ad2.check_ad_isexist())
# 应该没有广告时
#self.assertFalse(start_ad2.check_ad_isexist())
def test_clickSkip(self):
start_ad2 = start_ad(self.driver)
navigation_button2 = start_ad2.check_adSkip()
if navigation_button2 is False :
logging.info("跳过失败了")
else:
self.assertTrue(navigation_button2.isExistnews_button())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "179416326/synthetic-tm-generator",
"score": 2
} |
#### File: 179416326/synthetic-tm-generator/generate.py
```python
from __future__ import print_function
import yaml
import json
import numpy as np
import sys
import random
import argparse
from subprocess import call
import tempfile
import re
import os
from pyomo.environ import *
from pyomo.opt import SolverFactory
import networkx as nx
from matplotlib.cm import autumn_r, hot_r
import matplotlib.pyplot as plt
"""Generates nb_nodes * (nb_nodes - 1) flow rates according to the Gravity model.
Args:
nb_nodes: Number of nodes in the network.
total_traffic: Total traffic going through the network, in Mbps.
Returns:
The flow rates as a matrix indexed by node numbers. Cells in the diagonal are null.
"""
def generate_tm(nb_nodes, total_traffic):
mean = 0.086
# rate = 1 / mean
tin = np.random.exponential(mean, nb_nodes)
tout = np.random.exponential(mean, nb_nodes)
tm = np.zeros((nb_nodes, nb_nodes))
sum_tin = np.sum(tin)
sum_tout = np.sum(tout)
for i in range(nb_nodes):
for j in range(nb_nodes):
if i != j:
tm[i][j] = total_traffic * tin[i] / sum_tin * tout[j] / sum_tout
return tm
"""Defines the variables, the constraints, and the objective for use by Pyomo.
Args:
nodes: List of node IDs.
links: A dictionary of link capacities (in Mbps) indexed by link's edges.
od_pairs: List of OD pair (as tuples).
flow_rates: List of flow rates, in Mbps.
Returns:
The Pyomo model.
"""
def define_model(nodes, links, od_pairs, flow_rates):
L = range(len(flow_rates))
model = ConcreteModel()
model.mapping = Var(od_pairs, L, within=Binary)
model.pair_rates = Var(od_pairs, within=NonNegativeReals)
model.link_rates = Var(links.keys(), od_pairs, within=NonNegativeReals)
model.z = Var(within=NonNegativeReals)
def max_link_load(model):
return model.z
model.obj = Objective(rule=max_link_load, sense=minimize)
def single_rate_per_pair(model, k1, k2):
return sum(model.mapping[k1, k2, l] for l in L) == 1
model.nine = Constraint(od_pairs, rule=single_rate_per_pair)
def single_pair_per_rate(model, l):
return sum(model.mapping[k1, k2, l] for (k1, k2) in od_pairs) == 1
model.ten = Constraint(L, rule=single_pair_per_rate)
def compute_pair_rates(model, k1, k2):
return model.pair_rates[k1, k2] == sum(model.mapping[k1, k2, l] * flow_rates[l] for l in L)
model.eleven = Constraint(od_pairs, rule=compute_pair_rates)
def flow_conservation(model, u_, k1, k2):
if u_ == k1:
return sum(model.link_rates[v, u, k1, k2] for (v, u) in links.keys() if u_ == u) - sum(model.link_rates[u, v, k1, k2] for (u, v) in links.keys() if u_ == u) == -model.pair_rates[k1, k2]
if u_ == k2:
return sum(model.link_rates[v, u, k1, k2] for (v, u) in links.keys() if u_ == u) - sum(model.link_rates[u, v, k1, k2] for (u, v) in links.keys() if u_ == u) == model.pair_rates[k1, k2]
return sum(model.link_rates[v, u, k1, k2] for (v, u) in links.keys() if u_ == u) - sum(model.link_rates[u, v, k1, k2] for (u, v) in links.keys() if u_ == u) == 0
model.twelve = Constraint(nodes, od_pairs, rule=flow_conservation)
def compute_max_link_load(model, u, v):
return model.z >= (sum(model.link_rates[u, v, k1, k2] for (k1, k2) in od_pairs) / links[u, v])
model.thirteen = Constraint(links.keys(), rule=compute_max_link_load)
return model
"""Assigns flow rates to the given network using the ILP method.
Uses GLPK as the solver and Pyomo as the Python interface.
Args:
data: The dictionary containing the topology and the node capacities.
tm: The matrix of generated flow rates.
mipgap: The mipgap argument for the GLPK solver.
Returns:
0
"""
def assign_flow_rates_ilp(data, tm, mipgap):
nodes = [node['id'] for node in data['nodes']]
links = {}
for link in data['links']:
links[(link['source'], link['destination'])] = link['capacity']
od_pairs = []
flow_rates = []
for i in range(1, len(nodes) + 1):
for j in range(1, len(nodes) + 1):
if i != j:
flow_rates.append(int(round(tm[i - 1, j - 1])))
od_pairs.append((i, j))
model = define_model(nodes, links, od_pairs, flow_rates)
solver = SolverFactory('glpk')
solver.set_options("mipgap=%f" % mipgap)
results = solver.solve(model)
print(results)
if str(results['Solver'][0]['Termination condition']) == 'infeasible':
return None
model.solutions.load_from(results)
for link in data['links']:
for (s, d) in od_pairs:
link['legit_load'] += model.link_rates[link['source'], link['destination'], s, d].value
return 0
"""Assigns flow rates to the given network using the ILP method.
Uses GLPK as the solver but without relying on Pyomo as the Python interface.
Instead, it generates the model file with all the parameters and directly calls glpsol.
Args:
data: The dictionary containing the topology and the node capacities.
tm: The matrix of generated flow rates.
mipgap: The mipgap argument for the GLPK solver.
Returns:
The objective value, the maximum link load.
"""
def assign_flow_rates_ilp_glpk(data, tm, mipgap):
# Generate model file from template GMPL program:
nb_od_pairs = 1
flow_rates_str = ""
od_pairs_str = ""
for i in range(1, len(nodes) + 1):
for j in range(1, len(nodes) + 1):
if i != j:
flow_rates_str += "\n %d %f" % (nb_od_pairs, tm[i - 1, j - 1])
nb_od_pairs += 1
od_pairs_str += "\n %d %d" % (i, j)
nb_od_pairs -= 1
links_str = ""
link_capacities_str = ""
nb_links = 0
for link in data['links']:
links_str += "\n %d %d" % (link['source'], link['destination'])
link_capacities_str += "\n %d %d %d" % (link['source'], link['destination'], link['capacity'])
nb_links += 1
with open("template.mod", 'r') as fh:
mip_program = fh.read()
mip_program = mip_program.replace("NB_NODES", str(len(data['nodes'])))
mip_program = mip_program.replace("NB_OD_PAIRS", str(nb_od_pairs))
mip_program = mip_program.replace("OD_PAIRS", od_pairs_str)
mip_program = mip_program.replace("LINKS", links_str)
mip_program = mip_program.replace("FLOW_RATES", flow_rates_str)
mip_program = mip_program.replace("LINK_CAPABILITIES", link_capacities_str)
_, tmp_model_file = tempfile.mkstemp()
with open(tmp_model_file, 'w') as fh:
fh.write(mip_program)
# Run GLPK
_, tmp_output_file = tempfile.mkstemp()
call(["glpsol", "--mipgap", str(mipgap), "--model", tmp_model_file, "-o", tmp_output_file])
# Retrieve variables' values:
nb_constraints = 1 + 3 * nb_od_pairs + nb_od_pairs * len(data['nodes']) + nb_links
nb_mapping_vars = nb_od_pairs * nb_od_pairs
nb_pair_rate_vars = nb_od_pairs
link_rates = {}
objective = None
with open(tmp_output_file, 'r') as fh:
regexp = re.compile(r'^\s+\d+\s+link_rates\[(\d+),(\d+),\d+,\d+\]$')
val_line = False
for line in fh:
if val_line:
if (src, dst) in link_rates:
link_rates[src, dst] += float(line.split()[0])
else:
link_rates[src, dst] = float(line.split()[0])
val_line = False
else:
m = regexp.match(line)
if m:
src = int(m.group(1))
dst = int(m.group(2))
val_line = True
elif "Objective: max_load" in line:
objective = float(line.split()[3])
if nb_links != len(link_rates):
print("Error: The number of link_rates variables retrieved from the glpsol output doesn't match the number of links. %d %d", (nb_links, len(link_rates)), file=sys.stderr)
sys.exit(1)
for link in data['links']:
link['legit_load'] = link_rates[link['source'], link['destination']]
os.remove(tmp_model_file)
os.remove(tmp_output_file)
return objective
"""Routes flows between OD pairs on the network according to all_paths.
Args:
links: Dictionary of links, with the legitimate load value indexed under 'legit_load', indexed by link's edges.
all_paths: Dictionary of tuples' lists, indexed by OD pair's edges. Tuples contain a path as a list of nodes to traverse and a weight for that path.
od_pairs: List of OD pairs (as tuples).
flow_rates: List of flow rates. Flow rates i will be routed on OD pair i.
"""
def route_flows_multipaths(links, all_paths, od_pairs, flow_rates):
for i in xrange(len(od_pairs)):
(src, dst) = od_pairs[i]
paths = all_paths[src, dst]
for (path, weight) in paths:
flow_rate = flow_rates[i] * weight
for j in range(len(path) - 1):
links[path[j], path[j + 1]]['legit_load'] += flow_rate
"""Computes the capacity of a given path.
Args:
links: Dictionary of links, with the legitimate load value indexed under 'capacity', indexed by link's edges.
path: Path, as a list of nodes.
Returns:
The lowest link capacity on the path.
"""
def path_capacity(links, path):
min_cap = float("inf")
for i in range(len(path) - 1):
min_cap = min(min_cap, links[path[i], path[i + 1]]['capacity'])
return min_cap
"""Assigns flow rates to the given network using the heuristic method.
Args:
data: The dictionary containing the topology and the node capacities.
tm: The matrix of generated flow rates.
mipgap: Not used. For compatibility with other methods' functions.
Returns:
The maximum link load.
"""
def assign_flow_rates_heuristic(data, tm, mipgap):
flow_rates = []
od_pairs = []
for i in range(1, len(nodes) + 1):
for j in range(1, len(nodes) + 1):
if i != j:
flow_rates.append(tm[i - 1, j - 1])
od_pairs.append((i, j))
# Compute multipath routing between all node pairs:
links = {}
for link in data['links']:
links[link['source'], link['destination']] = {'legit_load': 0, 'capacity': link['capacity']}
G = nx.DiGraph([(link['source'], link['destination']) for link in data['links']])
all_paths = {}
for (src, dst) in od_pairs:
all_paths[src, dst] = []
try:
paths = [p for p in nx.all_shortest_paths(G, source=src, target=dst)]
except nx.exception.NetworkXNoPath:
print("Error: No path found between %d and %d." % (src, dst), file=sys.stderr)
sys.exit(1)
max_flow = 0
path_capacities = []
for path in paths:
capacity = path_capacity(links, path)
path_capacities.append(capacity)
max_flow += capacity
for i in range(len(paths)):
weight = path_capacities[i] * 1.0 / max_flow
all_paths[src, dst].append((paths[i], weight))
# Compute sort information on OD pairs:
node_infos = {}
for node in data['nodes']:
node_infos[node['id']] = {'fanout': 0, 'fanin': 0, 'connectivity': 0, 'nb_paths': 0}
for link in data['links']:
node_infos[link['destination']]['connectivity'] += 1
node_infos[link['destination']]['fanin'] += link['capacity']
node_infos[link['source']]['connectivity'] += 1
node_infos[link['source']]['fanout'] += link['capacity']
for (src, dst) in all_paths:
for (path, _) in all_paths[src, dst]:
for node in path[1:-1]:
node_infos[node]['nb_paths'] += 1
od_pair_infos = {}
for (src, dst) in od_pairs:
src_infos = node_infos[src]
dst_infos = node_infos[dst]
m1 = min(src_infos['fanout'], dst_infos['fanin'])
m2 = min(src_infos['connectivity'], dst_infos['connectivity'])
if src_infos['nb_paths'] == dst_infos['nb_paths'] == 0:
m3 = float("Inf")
else:
m3 = 1.0 / max(src_infos['nb_paths'], dst_infos['nb_paths'])
od_pair_infos[(src, dst)] = {'m1': m1, 'm2': m2, 'm3': m3}
# Sort OD pairs:
def make_comparator(od_pair_infos):
def compare(od1, od2):
pair1_infos = od_pair_infos[od1]
pair2_infos = od_pair_infos[od2]
if pair1_infos['m1'] == pair2_infos['m1']:
if pair1_infos['m2'] == pair2_infos['m2']:
if pair1_infos['m3'] == pair2_infos['m3']:
return 0
elif pair1_infos['m3'] > pair2_infos['m3']:
return 1
else:
return -1
elif pair1_infos['m2'] > pair2_infos['m2']:
return 1
else:
return -1
elif pair1_infos['m1'] > pair2_infos['m1']:
return 1
else:
return -1
return compare
flow_rates.sort(reverse=True)
od_pairs = sorted(od_pairs, cmp=make_comparator(od_pair_infos), reverse=True)
# Route flows between OD pairs:
route_flows_multipaths(links, all_paths, od_pairs, flow_rates)
# Write link loads to YAML and compute objective value:
objective = 0
for link in data['links']:
link['legit_load'] = links[link['source'], link['destination']]['legit_load']
max_link_load = link['legit_load'] / link['capacity']
if max_link_load > objective:
objective = max_link_load
return objective
"""Displays the graph of the network with legitimate loads on edges, in a new window.
Args:
data: The dictionary containing the topology and the node capacities.
"""
def display_graph(data):
links = [(link['source'], link['destination']) for link in data['links']]
edge_labels = {}
for link in data['links']:
if link['legit_load'] > 0:
edge_labels[(link['source'], link['destination'])] = link['legit_load']
G = nx.DiGraph(links)
pos = nx.spring_layout(G)
nx.draw(G, pos=pos, with_labels=True, arrows=False)
nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)
plt.show()
"""Computes the mean load factor (load / capacity) of links in the network.
Args:
data: The dictionary containing the topology and the node capacities.
Returns:
The mean link load factor.
"""
def compute_mean_link_load(data):
tot_link_load = 0
nb_links = 0
for link in data['links']:
tot_link_load += link['legit_load'] / link['capacity']
nb_links += 1
return tot_link_load / nb_links
"""Scales down the legitimate loads of links in the network by factor.
Args:
data: The dictionary containing the topology and the node capacities.
factor: The value link loads are divided by.
"""
def scale_down_tm(data, factor):
for link in data['links']:
link['legit_load'] /= factor
"""Generates node and link items for the output YAML file.
For each link (u, v), a back link (v, u) of equal capacity is created.
Link capacities are all adapted according to the max_link_capacity.
Args:
data: The dictionary containing the output information.
template: The template information, directly from the YAML file (dictionary).
max_node_class: The maximum node class to include in the output network. Used to restrict the network's size.
max_link_capacity: The capacity of the largest links.
Returns:
The list of selected nodes, according to the max_node_class argument.
"""
def generate_topology(data, template, max_node_class, max_link_capacity):
classes = {}
for class_ in template['classes']:
classes[class_['name']] = class_
# Collects neighbors for each node:
all_neighbors = {}
for node in template['nodes']:
neighbors = []
all_neighbors[node['id']] = neighbors
for link in template['links']:
if link['destination'] == node['id']:
neighbors.append((link['source'], link['capacity']))
elif link['source'] == node['id']:
neighbors.append((link['destination'], link['capacity']))
neighbors.sort(key=lambda tup: tup[0])
# Collects links:
links = {}
for link in template['links']:
if link['source'] < link['destination']:
links[(link['source'], link['destination'])] = link['capacity']
else:
links[(link['destination'], link['source'])] = link['capacity']
# Selects the nodes according to the max. node class wanted:
nodes = []
i = 0
for node in template['nodes']:
if node['class'] <= max_node_class:
class_ = classes[node['class']]
new_node = {'id': node['id'], 'cpus': class_['cpus'], 'memory': class_['memory']}
data['nodes'].append(new_node)
nodes.append(node['id'])
else:
# Removed node, need to bridge the neighbors:
nb_neighbors = len(all_neighbors[node['id']])
if nb_neighbors >= 2 and nb_neighbors <= 3:
for i in range(nb_neighbors):
neighbor1 = all_neighbors[node['id']][i]
neighbor2 = all_neighbors[node['id']][(i + 1) % nb_neighbors]
if neighbor1[0] == neighbor2[0]:
# Only allow edges between different nodes.
continue
# Retrieves the max capacity between the already existing link, if any, and the new:
capacity = max(neighbor1[1], neighbor2[1])
if (neighbor1[0], neighbor2[0]) in links:
link2_capacity = links[(neighbor1[0], neighbor2[0])]
if link2_capacity > capacity:
capacity = link2_capacity
else:
continue
link = {'source': neighbor1[0], 'destination': neighbor2[0], 'capacity': capacity}
template['links'].insert(0, link)
# Removes the current node from neighbor lists:
all_neighbors[link['source']] = [(u, cap) for (u, cap) in all_neighbors[link['source']] if u != node['id']]
all_neighbors[link['destination']] = [(u, cap) for (u, cap) in all_neighbors[link['destination']] if u != node['id']]
# Adds the new neighbors:
all_neighbors[link['source']].append((link['destination'], link['capacity']))
all_neighbors[link['destination']].append((link['source'], link['capacity']))
if nb_neighbors == 2:
# If we continue we'll add a back-edge between the two neighbors.
break
i += 1
# Selects the links according to the remaining nodes:
cur_max_link_capacity = max(template['links'], key=lambda link: link['capacity'])['capacity']
link_size_factor = max_link_capacity / cur_max_link_capacity
for link in template['links']:
if link['source'] in nodes and link['destination'] in nodes:
already_added = sum([l['source'] == link['source'] and l['destination'] == link['destination'] for l in data['links']])
if already_added == 0:
link['capacity'] = link['capacity'] * link_size_factor
data['links'].append(link)
back_link = dict(link)
back_link['source'] = link['destination']
back_link['destination'] = link['source']
data['links'].append(back_link)
return nodes
"""Generates the legitimate link loads according to the Gravity model.
Displays a few info messages on stdout.
Args:
data: The dictionary containing the topology and the node capacities.
nodes: The list of node IDs.
mipgap: The mipgap argument for the GLPK solver.
total_traffic:
target_mean_link_load:
flow_assign_method: The method to use to map generated flow rates into the network. One of 'heuristic', 'ilp', or 'ilp-glpk'.
"""
def generate_link_loads(data, nodes, mipgap, total_traffic, target_mean_link_load, flow_assign_method):
methods = {'heuristic': assign_flow_rates_heuristic, 'ilp': assign_flow_rates_ilp, 'ilp-glpk': assign_flow_rates_ilp_glpk}
tm = generate_tm(len(nodes), total_traffic)
objective = methods[flow_assign_method](data, tm, mipgap)
if objective > 1:
print("Scaling down TM by %f to reach feasible routing." % objective, file=sys.stderr)
scale_down_tm(data, objective)
mean_link_load = compute_mean_link_load(data)
factor = mean_link_load / target_mean_link_load
if factor < 1:
print("Mean link load is at %f." % mean_link_load, file=sys.stderr)
else:
print("Scaling down TM by %f to reach %f mean link load." % (factor, args.mean_link_load), file=sys.stderr)
scale_down_tm(data, factor)
"""Generates attacks between the given attackers and targets.
Attack loads follow an exponential distribution.
Attackers and targets are selected randomly.
Args:
data: The dictionary containing the topology and the node capacities.
nodes: The list of node IDs.
nb_attackers: The number attackers. 0 for all nodes.
nb_targets: The number of targets.
mean_attack_load: Mean attack load, in Mbps.
"""
def generate_attacks(data, nodes, nb_attackers, nb_targets, mean_attack_load):
random.shuffle(nodes)
if nb_attackers == 0:
attackers = nodes
else:
attackers = nodes[0:nb_attackers]
targets = nodes[0:nb_targets]
for attacker in attackers:
target = targets[random.randint(0, nb_targets - 1)]
load = round(np.random.exponential(mean_attack_load))
attack = {'source': attacker, 'destination': target, 'load': load}
data['attacks'].append(attack)
"""Rounds values for the legitimate loads on links.
Args:
data: The dictionary containing the topology and the node capacities.
"""
def round_link_loads(data):
for link in data['links']:
link['legit_load'] = round(link['legit_load'])
"""Format all output information into text format
Args:
data: The dictionary containing the topology and the node capacities.
Returns:
The text to display or write to file.
"""
def format_text(data):
nb_nodes = len(data['nodes'])
adjacency_matrix = [[0 for x in range(nb_nodes)] for y in range(nb_nodes)]
legit_load_matrix = [[0 for x in range(nb_nodes)] for y in range(nb_nodes)]
for link in data['links']:
adjacency_matrix[link['source'] - 1][link['destination'] - 1] = link['capacity']
legit_load_matrix[link['source'] - 1][link['destination'] - 1] = link['legit_load']
adjacency_matrix_text = "["
legit_load_matrix_text = "["
for i in range(nb_nodes - 1):
adjacency_matrix_text += "%s\n " % json.dumps(adjacency_matrix[i])
legit_load_matrix_text += "%s\n " % json.dumps(legit_load_matrix[i])
adjacency_matrix_text += "%s]" % json.dumps(adjacency_matrix[nb_nodes - 1])
legit_load_matrix_text += "%s]" % json.dumps(legit_load_matrix[nb_nodes - 1])
text = "%s\n\n%s" % (adjacency_matrix_text, legit_load_matrix_text)
resource_matrix = [[0 for x in range(nb_nodes)] for y in range(2)]
for node in data['nodes']:
resource_matrix[0][node['id'] - 1] = node['cpus']
resource_matrix[1][node['id'] - 1] = node['memory']
resource_matrix_text = "[%s,\n" % json.dumps(resource_matrix[0])
resource_matrix_text += " %s]" % json.dumps(resource_matrix[1])
text = "%s\n\n%s" % (text, resource_matrix_text)
nb_attacks = len(data['attacks'])
attack_source_vector = [0 for x in range(nb_attacks)]
attack_dest_vector = [0 for x in range(nb_attacks)]
attack_load_vector = [0 for x in range(nb_attacks)]
i = 0
for attack in data['attacks']:
attack_source_vector[i] = attack['source']
attack_dest_vector[i] = attack['destination']
attack_load_vector[i] = attack['load']
i += 1
text = "%s\n\n%s\n%s\n%s" % (text, json.dumps(attack_source_vector), json.dumps(attack_dest_vector), json.dumps(attack_load_vector))
nb_vnfs = len(data['vnfs'])
vnf_capacity_vector = [0 for x in range(nb_vnfs)]
vnf_cost_vector = [0 for x in range(nb_vnfs)]
vnf_resource_matrix = [[0 for x in range(nb_vnfs)] for y in range(2)]
i = 0
for vnf in data['vnfs']:
vnf_capacity_vector[i] = vnf['capacity']
vnf_cost_vector[i] = vnf['cost']
vnf_resource_matrix[0][i] = vnf['cpus']
vnf_resource_matrix[1][i] = vnf['memory']
i += 1
vnf_resource_matrix_text = "[%s,\n" % json.dumps(resource_matrix[0])
vnf_resource_matrix_text += " %s]" % json.dumps(resource_matrix[1])
vnf_capacity_vector_text = json.dumps(vnf_capacity_vector)
vnf_cost_vector_text = json.dumps(vnf_cost_vector)
text = "%s\n\n%s\n\n%s\n\n%s" % (text, vnf_resource_matrix_text, vnf_capacity_vector_text, vnf_cost_vector_text)
return text
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generate a set of inputs for the VNF placement problem.')
parser.add_argument('template_file',
help='Path to the template file to use.')
parser.add_argument('output_file',
help='Path to the output file to use. - for stdout.')
parser.add_argument('--method-flow-assign', choices=['ilp', 'ilp-glpk', 'heuristic'], default='heuristic',
help='Method for the assignment of flow rates to the network. ilp an dilp-glpk use the'
' same solver, except ilp-glpk is tailored for GLPK and has a lower memory footprint. Defaults to %(default)s.')
parser.add_argument('--max-node-class', type=int, default=3,
help='The maximum class of nodes to select (1, 2, or 3). Defaults to %(default)s to select all nodes from the template.')
parser.add_argument('--max-link-capacity', type=int, default=100000,
help='The capacity assigned to the largest links, in Mbps. Defaults to 100 Gbps.')
parser.add_argument('--total-traffic', type=int, default=1000000,
help='Total traffic going through the network, in Mbps. Used by the gravity model. Defaults to 1 Tbps.')
parser.add_argument('nb_attackers', type=int,
help='Number of nodes from which the attacking flows arrive. 0 for all nodes.')
parser.add_argument('nb_targets', type=int, help='Number of targets for the DDoS attacks.')
parser.add_argument('--mean-attack-load', type=int,
default=10000, help='Mean attack load, in Mbps. Used to generate the attack loads. Defaults to 10 Gbps.')
parser.add_argument('--mipgap', type=float, default=0.2,
help='mipgap parameter for GLPK. Defaults to %(default)s.')
parser.add_argument('--mean-link-load', type=float, default=0.5, help='Mean link load. Defaults to %(default)s.')
parser.add_argument('--display-network', action='store_true', help='Display the network graph with legitimate link loads in a new window.')
parser.add_argument('--yaml', action='store_true', default=False, help='Output a YAML document.')
args = parser.parse_args()
with open(args.template_file, 'r') as fh:
try:
template = yaml.load(fh)
except yaml.YAMLError as e:
print(e, file=sys.stderr)
sys.exit(1)
data = {'nodes': [], 'links': [], 'attacks': [], 'vnfs': template['vnfs']}
nodes = generate_topology(data, template, args.max_node_class, args.max_link_capacity)
if args.nb_attackers > len(nodes):
print("Error: More attackers required than there are nodes (%d) in the network." % len(nodes), file=sys.stderr)
sys.exit(1)
if args.nb_targets > len(nodes):
print("Error: More targets required than there are nodes (%d) in the network." % len(nodes), file=sys.stderr)
sys.exit(1)
generate_link_loads(data, nodes, args.mipgap, args.total_traffic, args.mean_link_load, args.method_flow_assign)
generate_attacks(data, nodes, args.nb_attackers, args.nb_targets, args.mean_attack_load)
round_link_loads(data)
if args.yaml:
if args.output_file == '-':
print(yaml.dump(data, default_flow_style=False))
else:
with open(args.output_file, 'w') as fh:
yaml.dump(data, fh, default_flow_style=False)
else:
text = format_text(data)
if args.output_file == '-':
print(text)
else:
with open(args.output_file, 'w') as fh:
fh.write(text)
if args.display_network:
display_graph(data)
``` |
{
"source": "17edwins17/WaifuBot1796",
"score": 3
} |
#### File: 17edwins17/WaifuBot1796/Main.py
```python
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import time
import random
from random import randint
import Config
import datetime
#Determine the bots prefix
bot = commands.Bot(command_prefix = Config.PREFIX)
@bot.event
async def on_ready():
print("===================================")
print("Logged in as: %s"%bot.user.name)
print("ID: %s"%bot.user.id)
print('Server count:', str(len(bot.servers)))
print('User Count:',len(set(bot.get_all_members())))
print("Py Lib Version: %s"%discord.__version__)
print("===================================")
@bot.command(pass_context=True)
async def ping(ctx):
"""Check The Bots Response Time"""
t1 = time.perf_counter()
await bot.send_typing(ctx.message.channel)
t2 = time.perf_counter()
thedata = (":ping_pong: **Pong.**\nTime: " + str(round((t2 - t1) * 1000)) + "ms")
color = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
data = discord.Embed(description = thedata, colour=discord.Colour(value = color))
data.set_footer(text="{} | Requested by: {}".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = data)
@bot.command(pass_context=True)
async def serverinfo(ctx):
"""Shows information about the server"""
server = ctx.message.server
online = len([m.status for m in server.members
if m.status == discord.Status.online or
m.status == discord.Status.idle])
total_users = len(server.members)
text_channels = len([x for x in server.channels
if x.type == discord.ChannelType.text])
voice_channels = len(server.channels) - text_channels
passed = (ctx.message.timestamp - server.created_at).days
created_at = ("Since {}. That's over {} days ago!"
"".format(server.created_at.strftime("%d %b %Y %H:%M"), passed))
colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
embed = discord.Embed(description = created_at, colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "Region", value = str(server.region))
embed.add_field(name = "Users Online", value = "{}/{}".format(online, total_users))
embed.add_field(name = "Text Channels", value = text_channels)
embed.add_field(name = "Voice Channels", value = voice_channels)
embed.add_field(name = "Roles", value = len(server.roles))
embed.add_field(name = "Owner", value = str(server.owner))
embed.set_footer(text = "Server ID: " + server.id)
embed.add_field(name = "AFK Timeout", value = "{} minutes".format(server.afk_timeout/60).replace(".0", ""))
embed.add_field(name = "AFK Channel", value = str(server.afk_channel))
embed.add_field(name = "Verification Level", value = str(server.verification_level))
embed.set_footer(text= "{} | Requested by: {}".format(Config.BOTNAME, ctx.message.author))
if server.icon_url:
embed.set_author(name = server.name, url = server.icon_url)
embed.set_thumbnail(url = server.icon_url)
else:
embed.set_author(name=server.name)
await bot.say(embed = embed)
@bot.command(pass_context=True)
async def count(ctx):
"""The amout of users/servers im in"""
users = len(set(bot.get_all_members()))
servers = len(bot.servers)
colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "Servers im in: ", value = servers)
embed.add_field(name = "Users i have: ",value = users)
embed.set_footer(text= "{} | Requested by: {} at".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = embed)
@bot.command(pass_context=True)
async def roleinfo(ctx, *,role: discord.Role = None):
"""Info about a role"""
if role == None:
await bot.say(":x: | Role not found")
else:
colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
colour = int(colour, 16)
embed = discord.Embed(colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "Role Name", value = format(role.name))
embed.add_field(name = "Role ID", value = format(role.id))
embed.add_field(name = "For Server", value = format(role.server))
embed.add_field(name = "Hoist", value = format(role.hoist))
embed.add_field(name = "Role Position", value = format(role.position))
embed.add_field(name = "Mentionable Role", value = format(role.mentionable))
embed.add_field(name = "Role Created At", value = format(role.created_at))
embed.set_footer(text= "{} | Requested by: {} at".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = embed)
@bot.command(pass_context = True)
async def kick(ctx, member : discord.Member = None, *, reason = ""):
'''Kick a user from the server with a reason'''
user_roles = [r.name.lower() for r in ctx.message.author.roles]
if "admin" in user_roles:
if member == None:
await bot.say(":x: | Specify a user to `Kick`")
if reason == "":
await bot.say(":x: | You need a `Reason`")
else:
embed = discord.Embed(description = "{} was kicked.".format(member.name), color = 0xF00000)
embed.add_field(name = "Reason: ", value = reason)
embed.add_field(name = "Moderator:", value=ctx.message.author, inline = True)
embed.set_footer(text = "{} | Kicked by: {}".format(Config.BOTNAME, ctx.message.author))
await bot.kick(member)
await bot.say(embed = embed)
await bot.delete_message(ctx.message)
else:
await bot.say(":x: Your Not Admin!")
@bot.command(pass_context=True)
async def clear(ctx, number):
'''Clears The Chat 2-100'''
user_roles = [r.name.lower() for r in ctx.message.author.roles]
if "admin" in user_roles:
mgs = []
number = int(number)
async for x in bot.logs_from(ctx.message.channel, limit = number):
mgs.append(x)
await bot.delete_messages(mgs)
else:
await bot.say(":x: Your Not Admin!")
@bot.command(pass_context = True)
async def warn(ctx, member: discord.Member = None, *, reason = ""):
'''Warn a user with a reason (Admin Only)'''
user_roles = [r.name.lower() for r in ctx.message.author.roles]
if "admin" in user_roles:
if member == None:
await bot.say(":x: | Please specify a `Member` to `Warn`")
if reason == "":
await bot.say(":x: | You must `Provide` a `Reason`")
else:
color = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
embed = discord.Embed(title = "__**Warning**__", colour=discord.Colour(value=color), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "User: ", value = member, inline = True)
embed.add_field(name="UserID: ", value = member.id, inline = True)
embed.add_field(name="Reason: ", value = reason, inline = True)
embed.add_field(name="Moderator:", value=ctx.message.author, inline=False)
embed.set_footer(text= "{} | Warned by: {}".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = embed)
await bot.delete_message(ctx.message)
else:
await bot.say(":x: Your Not Admin!")
@bot.command(pass_context = True)
async def purge(ctx):
"""Clears the WHOLE channels History! (Admin Only)"""
user_roles = [r.name.lower() for r in ctx.message.author.roles]
if "admin" in user_roles:
await bot.say("Are you sure? This action can't be undone! yes or no?")
response = await bot.wait_for_message(author=ctx.message.author, channel=ctx.message.channel)
response = response.content.lower()
if response == "yes":
await bot.purge_from(ctx.message.channel, limit=99999)
if response =="no":
await bot.say("Purge Canceled")
else:
await bot.say(":x: Your Not Admin")
@bot.command(pass_context = True)
async def ban(ctx, user: discord.Member = None, *,reason = ""):
"""Bans a user from the server!"""
user_roles = [r.name.lower() for r in ctx.message.author.roles]
if "admin" in user_roles:
if user == None:
await bot.say(":x: | Specify a `User` to `Ban`")
if reason == "":
await bot.say(":x: | Missing a `Reason` to `Ban`")
else:
color = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
embed = discord.Embed(title = "__**User Ban**__", colour=discord.Colour(value=color), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "User: ", value = reason)
embed.add_field(name="Moderator:", value=ctx.message.author, inline = True)
embed.set_footer(text= "{} | Banned by: {}".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = embed)
await bot.ban(user)
else:
await bot.say(":x: Your Not Admin")
@bot.command(pass_context = True)
async def botinfo(ctx):
color = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])
color = int(color, 16)
embed = discord.Embed(colour=discord.Colour(value=color), timestamp = datetime.datetime.utcnow())
embed.add_field(name = "Creator: ", value = "Shutdown.py#2406")
embed.add_field(name = "Want A Bot Like Me?", value = "[Click Here To Make Your Own](https://github.com/RageKickedGamer/Basic-Bot-Tutorial)")
embed.set_footer(text= "{} | Requested by: {}".format(Config.BOTNAME, ctx.message.author))
await bot.say(embed = embed)
bot.run(Config.TOKEN)
``` |
{
"source": "17fk/17infoGatheringTools",
"score": 2
} |
#### File: gsil/gsil/engine.py
```python
import re
import socket
import traceback
import requests
from github import Github, GithubException
from bs4 import BeautifulSoup
from gsil.config import Config, public_mail_services, exclude_repository_rules, exclude_codes_rules
from .process import Process, clone
from IPy import IP
from tld import get_tld
from .log import logger
regex_mail = r"([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)"
regex_host = r"@([a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)"
regex_pass = r"(pass|password|<PASSWORD>)"
regex_title = r"<title>(.*)<\/title>"
regex_ip = r"^((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))$"
# Increase the number of single pages to reduce the number of requests
# https://developer.github.com/v3/#pagination
# 每一页的数量(会影响到报告的效率)
per_page = 50
# TODO The number of pre calculated requests according to rule number and number of pages
#
# pages * per_page * rules = requests
# 2 * 30 * 24 = 1440
#
# 默认扫描页数
default_pages = 4
class Engine(object):
def __init__(self, token):
"""
GitHub engine
"""
self.token = token
self.g = Github(login_or_token=token, per_page=per_page)
self.rule_object = None
self.code = ''
# jquery/jquery
self.full_name = ''
self.sha = ''
self.url = ''
# src/attributes/classes.js
self.path = ''
self.result = None
# 被排除掉的结果,为防止误报,将发送邮件人工核查
self.exclude_result = None
self.hash_list = None
self.processed_count = None
self.next_count = None
def process_pages(self, pages_content, page, total):
for index, content in enumerate(pages_content):
current_i = page * per_page + index
base_info = '[{k}] [{current}/{count}]'.format(k=self.rule_object.keyword, current=current_i, count=total)
# 没有处理成功的,且遇到三个已处理的则跳过之后所有的
if self.next_count == 0 and self.processed_count > 3:
logger.info('{b} Has encountered {pc} has been processed, skip the current rules!'.format(b=base_info, pc=self.processed_count))
return False
# html_url
self.url = content.html_url
# sha
try:
self.sha = content.sha
except Exception as e:
logger.warning('sha exception {e}'.format(e=e))
self.sha = ''
self.url = ''
if self.sha in self.hash_list:
# pass already processed
logger.info('{b} Processed, skip! ({pc})'.format(b=base_info, pc=self.processed_count))
self.processed_count += 1
continue
# path
self.path = content.path
# full name
self.full_name = content.repository.full_name.strip()
if self._exclude_repository():
# pass exclude repository
logger.info('{b} Excluded because of the path, skip!'.format(b=base_info))
continue
# code
try:
self.code = content.decoded_content.decode('utf-8')
except Exception as e:
logger.warning('Get Content Exception: {e} retrying...'.format(e=e))
continue
match_codes = self.codes()
if len(match_codes) == 0:
logger.info('{b} Did not match the code, skip!'.format(b=base_info))
continue
result = {
'url': self.url,
'match_codes': match_codes,
'hash': self.sha,
'code': self.code,
'repository': self.full_name,
'path': self.path,
}
if self._exclude_codes(match_codes):
logger.info('{b} Code may be useless, do not skip, add to list to be reviewed!'.format(b=base_info))
self.exclude_result[current_i] = result
else:
self.result[current_i] = result
# 独立进程下载代码
git_url = content.repository.html_url
clone(git_url, self.sha)
logger.info('{b} Processing is complete, the next one!'.format(b=base_info))
self.next_count += 1
return True
def verify(self):
try:
ret = self.g.rate_limiting
return True, 'TOKEN-PASSED: {r}'.format(r=ret)
except GithubException as e:
return False, 'TOKEN-FAILED: {r}'.format(r=e)
def search(self, rule_object):
"""
Search content by rule on GitHub
:param rule_object:
:return: (ret, rule, msg)
"""
self.rule_object = rule_object
# 已经处理过的数量
self.processed_count = 0
# 处理成功的数量
self.next_count = 0
# max 5000 requests/H
try:
rate_limiting = self.g.rate_limiting
rate_limiting_reset_time = self.g.rate_limiting_resettime
logger.info('----------------------------')
# RATE_LIMIT_REQUEST: rules * 1
# https://developer.github.com/v3/search/#search-code
ext_query = ''
if self.rule_object.extension is not None:
for ext in self.rule_object.extension.split(','):
ext_query += 'extension:{ext} '.format(ext=ext.strip().lower())
keyword = '{keyword} {ext}'.format(keyword=self.rule_object.keyword, ext=ext_query)
logger.info('Search keyword: {k}'.format(k=keyword))
resource = self.g.search_code(keyword, sort="indexed", order="desc")
except GithubException as e:
msg = 'GitHub [search_code] exception(code: {c} msg: {m} {t}'.format(c=e.status, m=e.data, t=self.token)
logger.critical(msg)
return False, self.rule_object, msg
logger.info('[{k}] Speed Limit Results (Remaining Times / Total Times): {rl} Speed limit reset time: {rlr}'.format(k=self.rule_object.keyword, rl=rate_limiting, rlr=rate_limiting_reset_time))
logger.info('[{k}] The expected number of acquisitions: {page}(Pages) * {per}(Per Page) = {total}(Total)'.format(k=self.rule_object.keyword, page=default_pages, per=per_page, total=default_pages * per_page))
# RATE_LIMIT_REQUEST: rules * 1
try:
total = resource.totalCount
logger.info('[{k}] The actual number: {count}'.format(k=self.rule_object.keyword, count=total))
except socket.timeout as e:
return False, self.rule_object, e
except GithubException as e:
msg = 'GitHub [search_code] exception(code: {c} msg: {m} {t}'.format(c=e.status, m=e.data, t=self.token)
logger.critical(msg)
return False, self.rule_object, msg
self.hash_list = Config().hash_list()
if total < per_page:
pages = 1
else:
pages = default_pages
for page in range(pages):
self.result = {}
self.exclude_result = {}
try:
# RATE_LIMIT_REQUEST: pages * rules * 1
pages_content = resource.get_page(page)
except socket.timeout:
logger.info('[{k}] [get_page] Time out, skip to get the next page!'.format(k=self.rule_object.keyword))
continue
except GithubException as e:
msg = 'GitHub [get_page] exception(code: {c} msg: {m} {t}'.format(c=e.status, m=e.data, t=self.token)
logger.critical(msg)
return False, self.rule_object, msg
logger.info('[{k}] Get page {page} data for {count}'.format(k=self.rule_object.keyword, page=page, count=len(pages_content)))
if not self.process_pages(pages_content, page, total):
# 若遇到处理过的,则跳过整个规则
break
# 每一页发送一份报告
Process(self.result, self.rule_object).process()
# 暂时不发送可能存在的误报 TODO
# Process(self.exclude_result, self.rule_object).process(True)
logger.info('[{k}] The current rules are processed, the process of normal exit!'.format(k=self.rule_object.keyword))
return True, self.rule_object, len(self.result)
def codes(self):
# 去除图片的显示
self.code = self.code.replace('<img', '')
codes = self.code.splitlines()
codes_len = len(codes)
keywords = self._keywords()
match_codes = []
if self.rule_object.mode == 'mail':
return self._mail()
elif self.rule_object.mode == 'only-match':
# only match mode(只匹配存在关键词的行)
for code in codes:
for kw in keywords:
if kw in code:
match_codes.append(code)
return match_codes
elif self.rule_object.mode == 'normal-match':
# normal-match(匹配存在关键词的行及其上下3行)
for idx, code in enumerate(codes):
for keyword in keywords:
if keyword in code:
idxs = []
# prev lines
for i in range(-3, -0):
i_idx = idx + i
if i_idx in idxs:
continue
if i_idx < 0:
continue
if codes[i_idx].strip() == '':
continue
logger.debug('P:{x}/{l}: {c}'.format(x=i_idx, l=codes_len, c=codes[i_idx]))
idxs.append(i_idx)
match_codes.append(codes[i_idx])
# current line
if idx not in idxs:
logger.debug('C:{x}/{l}: {c}'.format(x=idx, l=codes_len, c=codes[idx]))
match_codes.append(codes[idx])
# next lines
for i in range(1, 4):
i_idx = idx + i
if i_idx in idxs:
continue
if i_idx >= codes_len:
continue
if codes[i_idx].strip() == '':
continue
logger.debug('N:{x}/{l}: {c}'.format(x=i_idx, l=codes_len, c=codes[i_idx]))
idxs.append(i_idx)
match_codes.append(codes[i_idx])
return match_codes
else:
# 匹配前20行
return self.code.splitlines()[0:20]
def _keywords(self):
if '"' not in self.rule_object.keyword and ' ' in self.rule_object.keyword:
return self.rule_object.keyword.split(' ')
else:
if '"' in self.rule_object.keyword:
return [self.rule_object.keyword.replace('"', '')]
else:
return [self.rule_object.keyword]
def _mail(self):
logger.info('[{k}] mail rule'.format(k=self.rule_object.keyword))
match_codes = []
mails = []
# 找到所有邮箱地址
# TODO 此处可能存在邮箱账号密码是加密的情况,导致取不到邮箱地址
mail_multi = re.findall(regex_mail, self.code)
for mm in mail_multi:
mail = mm.strip().lower()
if mail in mails:
logger.info('[SKIPPED] Mail already processed!')
continue
host = re.findall(regex_host, mail)
host = host[0].strip()
if host in public_mail_services:
logger.info('[SKIPPED] Public mail services!')
continue
mails.append(mail)
# get mail host's title
is_inner_ip = False
if re.match(regex_ip, host) is None:
try:
top_domain = get_tld(host, fix_protocol=True)
except Exception as e:
logger.warning('get top domain exception {msg}'.format(msg=e))
top_domain = host
if top_domain == host:
domain = 'http://www.{host}'.format(host=host)
else:
domain = 'http://{host}'.format(host=host)
else:
if IP(host).iptype() == 'PRIVATE':
is_inner_ip = True
domain = 'http://{host}'.format(host=host)
title = '<Unknown>'
if is_inner_ip is False:
try:
response = requests.get(domain, timeout=4).content
except Exception as e:
title = '<{msg}>'.format(msg=e)
else:
try:
soup = BeautifulSoup(response, "html5lib")
if hasattr(soup.title, 'string'):
title = soup.title.string.strip()[0:150]
except Exception as e:
title = 'Exception'
traceback.print_exc()
else:
title = '<Inner IP>'
match_codes.append("{mail} {domain} {title}".format(mail=mail, domain=domain, title=title))
logger.info(' - {mail} {domain} {title}'.format(mail=mail, domain=domain, title=title))
return match_codes
def _exclude_repository(self):
"""
Exclude some repository(e.g. github.io blog)
:return:
"""
ret = False
# 拼接完整的项目链接
full_path = '{repository}/{path}'.format(repository=self.full_name.lower(), path=self.path.lower())
for err in exclude_repository_rules:
if re.search(err, full_path) is not None:
return True
return ret
@staticmethod
def _exclude_codes(codes):
ret = False
for ecr in exclude_codes_rules:
if re.search(ecr, '\n'.join(codes)) is not None:
return True
return ret
``` |
{
"source": "17kisern/-GVSU-CIS457-Project2",
"score": 3
} |
#### File: -GVSU-CIS457-Project2/User/user.py
```python
import os
from os import path
import socket # Import socket module
import asyncio
import sys
"""
Notes
==============
socket.gethostname() gets the current machines hostname, for example "DESKTOP-1337PBJ"
string.encode('UTF-8') encodes the given string into a 'bytes' literal object using the UTF-8 standard that is required
bytes.decode("UTF-8") decodes some 'bytes' literal object using the UTF-8 standard that information gets sent over the internet in
all the b'string here' are converting a string into binary format. Hence the B
"""
connected = False
socketObject = socket.socket() # Create a socket object
responseBuffer = []
bufferSize = 1024
# host = socket.gethostname()
# host = "localhost" # Get local machine name
# port = 60000 # Reserve a port for your service.
def SendPayload(socketBoi, toSend: str):
payload = "".join([toSend, "\0"])
socketBoi.send(payload.encode("UTF-8"))
def RecvPayload(socketBoi):
# If we have shit in our respnse buffer, just use that
if(len(responseBuffer) > 0):
return responseBuffer.pop(0)
global bufferSize
returnString = ""
reachedEOF = False
while not reachedEOF:
# Receiving data in 1 KB chunks
data = socketBoi.recv(bufferSize)
if(not data):
reachedEOF = True
break
# If there was no data in the latest chunk, then break out of our loop
decodedString = data.decode("UTF-8")
if(len(decodedString) >= 2 and decodedString[len(decodedString) - 1: len(decodedString)] == "\0"):
reachedEOF = True
decodedString = decodedString[0:len(decodedString) - 1]
returnString += decodedString
# In case we received multiple responses, split everything on our EOT notifier (NULL \0), and cache into our response buffer
response = returnString.split("\0")
for entry in response:
responseBuffer.append(entry)
# Return the 0th index in the response buffer, and remove it from the response buffer
return responseBuffer.pop(0)
# Connect to a central server
def Connect(address, port: int, usernameOverride=""):
global connected
global socketObject
global bufferSize
try:
socketObject.connect((address, int(port)))
# data = socketObject.recv(bufferSize)
# connectionStatus = data.decode("UTF-8")
connectionStatus = RecvPayload(socketObject)
# Make sure we were accepted (server hasn't hit limit)
if(int(connectionStatus) != 200):
print("Connection Refused")
raise ConnectionRefusedError
else:
print("Connection Accepted")
print("\nSuccessfully connected to [", address, ":", int(port), "]")
usernameAccepted = False
while(not usernameAccepted):
if(usernameOverride == ""):
username = input("Username: ")
else:
username = usernameOverride
SendPayload(socketObject, username)
response = RecvPayload(socketObject)
if(response == "200"):
usernameAccepted = True
break
else:
print("Username not accepted. Please try another")
hostNameAccepted = False
while(not hostNameAccepted):
hostname = socket.gethostname()
SendPayload(socketObject, hostname)
response = RecvPayload(socketObject)
if(response == "200"):
hostNameAccepted = True
break
connectionSpeedAccepted = False
while(not connectionSpeedAccepted):
connectionSpeed = input("Connection Speed: ")
SendPayload(socketObject, connectionSpeed)
response = RecvPayload(socketObject)
if(response == "200"):
hostNameAccepted = True
break
connected = True
except ConnectionRefusedError:
print("\Server has reached it's user capacity. Please try again later.")
socketObject = socket.socket()
connected = False
except:
print("\nFailed to connect to [", address, ":", int(port), "]\nPlease Try Again")
socketObject = socket.socket()
connected = False
def ConnectGUI(address, port: int, usernameOverride=""):
global connected
if connected:
Disconnect(["connect", address, port])
Connect(address, port, usernameOverride)
if(connected):
RefreshServer()
print("\nReady to interact with Server")
else:
Connect(address, port, usernameOverride)
if(connected):
RefreshServer()
print("\nReady to interact with Server")
# Disconnect from the central server
def Disconnect(commandArgs):
global connected
global socketObject
try:
SendPayload(socketObject, " ".join(commandArgs))
socketObject.close()
socketObject = socket.socket()
print("Successfully disconnected")
connected = False
except:
print("Failed to disconnect! Please try again")
return
# Ask server for available files
def List(commandArgs):
global socketObject
global bufferSize
SendPayload(socketObject, " ".join(commandArgs))
# Receiving List of Strings
listOutput = ""
reachedEOF = False
while not reachedEOF:
# Receiving data in 1 KB chunks
data = RecvPayload(socketObject)
# Check of the data is a signifier of the end of transmission
responseCode = 0
try:
responseCode = int(data)
except:
responseCode = 0
if(not data or data == "" or responseCode == 205):
reachedEOF = True
break
# Not the end of the transmission
listOutput += data
# Send confirmation that we received, back to the server
SendPayload(socketObject, "201")
print(listOutput)
return
def Search(commandArgs):
List(commandArgs)
# Send our available files to the central server
def RefreshServer(commandArgs=[]):
# If this is the initial connection, we don't need to inform the Server we're sending files, as it's already expecting them
if(commandArgs):
SendPayload(socketObject, " ".join(commandArgs))
print("\nPlease give descriptions for all files in the current directory, one file at a time")
# Gather descriptions for each file we have, and tell the server about them
for fileFound in os.listdir("."):
responseCode = 0
# Keep looping as long as the server hasn't confirmed this file
while(responseCode != 201):
# Ask user for file description
descriptionPrompt = ""
if(responseCode == 301):
descriptionPrompt = "".join(["Something went wrong on the server. Please try again.\n", "Description [", fileFound, "]: "])
else:
descriptionPrompt = "".join(["Description [", fileFound, "]: "])
fileDescription = input(descriptionPrompt)
payload = "|".join([fileFound, fileDescription])
# Send that info to the server
SendPayload(socketObject, payload)
# Wait for servers acceptance code (success or failure)
response = RecvPayload(socketObject)
try:
responseCode = int(response)
except:
print("Errored out with response/Code:", response)
# Tell the server we're done
SendPayload(socketObject, "205")
# Ask server to retrieve a requested file
def Retrieve(commandArgs):
global socketObject
global bufferSize
SendPayload(socketObject, " ".join(commandArgs))
# First listen for status code
statusCode = "300"
statusCode = RecvPayload(socketObject)
if(int(statusCode) == 300):
print("File does not exist")
return
if(int(statusCode) != 200):
print("Error in downloading file")
return
# Prepping a fileStream for us to write into
try:
receivedFile = open(commandArgs[1], 'wb')
except:
print("Error in downloading file")
return
# Reading the file in from the server
reachedEOF = False
while not reachedEOF:
print('Downloading file from server...')
# Receiving data in 1 KB chunks
data = socketObject.recv(bufferSize)
if(not data):
reachedEOF = True
break
# If there was no data in the latest chunk, then break out of our loop
decodedString = data.decode("UTF-8")
if(len(decodedString) >= 2 and decodedString[len(decodedString) - 1: len(decodedString)] == "\0"):
reachedEOF = True
decodedString = decodedString[0: len(decodedString) - 1]
# Write data to a file
receivedFile.write(data)
receivedFile.close()
print("Successfully downloaded and saved: ", commandArgs[1])
return
# Send a requested file
def Store(commandArgs):
global socketObject
global bufferSize
# Sending status code for if the file exists
fileName = commandArgs[1]
try:
fileItself = open(fileName, "rb")
except:
print("Failed to open file: ", fileName)
return
# command = " "
# socketObject.send(command.join(commandArgs).encode("UTF-8"))
SendPayload(socketObject, " ".join(commandArgs))
# Breaking the file down into smaller data chunks
fileInBytes = fileItself.read(bufferSize)
while fileInBytes:
socketObject.send(fileInBytes)
# Reading in the next chunk of data
fileInBytes = fileItself.read(bufferSize)
fileItself.close()
print("Sent: ", commandArgs[1])
# Let the client know we're done sending the file
SendPayload(socketObject, "205")
return
# Shutdown the server
def Shutdown_Server(commandArgs):
global socketObject
SendPayload(socketObject, " ".join(commandArgs))
return
def Main():
global connected
print("Would you like to operate with command line or GUI?")
print(" - [0] Command Line")
print(" - [1] GUI")
userResponse = input("Interface: ")
if(userResponse == "0"):
print("\nYou have selected Command Line")
else:
print("\nLaunching GUI")
print("\nYou must first connect to a server before issuing any commands.")
while userResponse == "0":
print("\n-----------------------------\n")
userInput = input("Enter Command: ")
commandArgs = userInput.split()
commandGiven = commandArgs[0]
if(commandGiven.upper() == "CONNECT" and len(commandArgs) == 3):
if connected:
Disconnect(commandArgs)
Connect(commandArgs[1], commandArgs[2])
if(connected):
RefreshServer()
print("\nReady to interact with Server")
else:
Connect(commandArgs[1], commandArgs[2])
if(connected):
RefreshServer()
print("\nReady to interact with Server")
continue
else:
if not connected:
print("You must first connect to a server before issuing any commands.")
continue
if(commandGiven.upper() == "REFRESH_USER_FILES" and len(commandArgs) == 1):
RefreshServer(commandArgs)
continue
elif(commandGiven.upper() == "LIST" and len(commandArgs) == 1):
List(commandArgs)
continue
elif(commandGiven.upper() == "SEARCH" and len(commandArgs) == 2):
List(commandArgs)
continue
elif(commandGiven.upper() == "RETRIEVE" and len(commandArgs) == 2):
Retrieve(commandArgs)
continue
elif(commandGiven.upper() == "STORE" and len(commandArgs) == 2):
Store(commandArgs)
continue
elif(commandGiven.upper() == "DISCONNECT" and len(commandArgs) == 1):
Disconnect(commandArgs)
continue
elif(commandGiven.upper() == "QUIT" and len(commandArgs) == 1):
Disconnect(commandArgs)
break
elif(commandGiven.upper() == "SHUTDOWN_SERVER" and len(commandArgs) == 1):
Disconnect(commandArgs)
break
else:
print("Invalid Command. Please try again.")
continue
Main()
``` |
{
"source": "17LangF/virtual-cube",
"score": 4
} |
#### File: virtual-cube/cube/functions.py
```python
def convert_seconds(seconds: float) -> str:
"""
Convert time in seconds to days:hours:minutes:seconds.milliseconds
with leading 0s removed.
Parameters
----------
seconds : float
Number of seconds to be converted.
Returns
-------
str
Converted time.
"""
mS = int((seconds) * 1000)
D, mS = divmod(mS, 86400000)
H, mS = divmod(mS, 3600000)
M, mS = divmod(mS, 60000)
S, mS = divmod(mS, 1000)
H = str(H).zfill(2)
M = str(M).zfill(2)
S = str(S).zfill(2)
mS = str(mS).zfill(3)
time = f'{D}:{H}:{M}:{S}.{mS}'.lstrip('0:')
if time.startswith('.'):
time = '0' + time
return time
def issolved(cube: list) -> bool:
"""
Return True if the cube is solved, False otherwise.
A cube is a solved cube if every sticker of each side of the cube is
the same.
"""
return all(all(x == s[0][0] for y in s for x in y) for s in cube)
def orient(cube: list):
"""
Return list of moves which orient the cube, False if not possible.
For odd layered cubes, a cube is oriented if the white (U) centre is
facing up, and the green (F) centre is facing the front.
For even layered cubes, a cube is oriented if the yellow-blue-orange
(DBL) corner is in the down-back-left (DBL) position correctly
oriented.
"""
size = len(cube[0])
if size % 2:
mid = size // 2
centres = [side[mid][mid] for side in cube]
if {'U', 'F'}.issubset(centres):
u = centres.index('U')
f = centres.index('F')
else:
return False
else:
vertices = (
((0,0,0), (4,0,-1), (1,0,0)),
((0,0,-1), (3,0,-1), (4,0,0)),
((0,-1,-1), (2,0,-1), (3,0,0)),
((0,-1,0), (1,0,-1), (2,0,0)),
((5,0,0), (2,-1,0), (1,-1,-1)),
((5,0,-1), (3,-1,0), (2,-1,-1)),
((5,-1,-1), (4,-1,0), (3,-1,-1)),
((5,-1,0), (1,-1,0), (4,-1,-1))
)
for v in vertices:
vertex = [cube[v[i][0]][v[i][1]][v[i][2]] for i in range(3)]
if set(vertex) == set('DBL'):
u = v[vertex.index('D')][0]
f = v[vertex.index('B')][0]
if not u:
u = 5
elif u == 5:
u = 0
else:
u = (u + 1) % 4 + 1
if not f:
f = 5
elif f == 5:
f = 0
else:
f = (f + 1) % 4 + 1
orientations = (
(False, ["y'"], [], ['y'], ['y2'], False),
(['z','y'], False, ['z'], False, ['z','y2'], ['z',"y'"]),
(['x','y2'], ['x',"y'"], False, ['x','y'], False, ['x']),
(["z'","y'"], False, ["z'"], False, ["z'",'y2'], ["z'",'y']),
(["x'"], ["x'","y'"], False, ["x'",'y'], False, ["x'",'y2']),
(False, ['x2',"y'"], ['z2'], ['x2','y'], ['x2'], False)
)
return orientations[u][f]
def parse_moves(moves: str) -> list:
"""
Interpret moves with brackets.
Grouping: using round brackets, e.g., (R U R' U').
Repetition: using a number after a bracket, e.g., (R U R' U')3.
Reversing: using an apostrophe after a bracket, e.g., (R U R' U')'.
Commutators: using square brackets with a comma, e.g., [R, U].
Conjugates: using square brackets with a colon, e.g., [R: U].
Returns
-------
list of str
Moves interpreted.
Raises
------
MoveError
If the moves could not be interpreted.
"""
from cube import MoveError
# Split joined moves
i = 0
previous_move = False
special_chars = set(' ()[],:')
move_chars = set('ULFRBDMESXYZ')
while i < len(moves):
if moves[i] in special_chars:
previous_move = False
elif moves[i].upper() in move_chars:
if previous_move:
moves = f'{moves[:i]} {moves[i:]}'
i += 2
continue
else:
previous_move = True
i += 1
# Bracketed moves
while any(bracket in moves for bracket in '()[]'):
brackets = []
for i, char in enumerate(moves):
if char in {'(', '['}:
brackets.append((char, i))
elif char in {')', ']'}:
if (not len(brackets) or
brackets[-1][0] != '(['[')]'.index(char)]):
raise MoveError("Brackets are not balanced.")
bracket = moves[brackets[-1][1]+1: i]
if char == ']':
for separator in {',', ':'}:
if bracket.count(separator) == 1:
a, b = bracket.split(separator)
a = a.strip()
b = b.strip()
if not (a and b):
raise MoveError(
"There must be at least one move on both "
"sides of the separator.")
break
else:
raise MoveError(
"Square brackets must contain one comma or one "
"colon.")
# Commutator
if separator == ',':
reverse_a = ' '.join(reverse(a.split()))
reverse_b = ' '.join(reverse(b.split()))
bracket = f'{a} {b} {reverse_a} {reverse_b}'
# Conjugate
else:
reverse_a = ' '.join(reverse(a.split()))
bracket = f'{a} {b} {reverse_a}'
# Repetition
if i < len(moves) - 1:
for end, char in enumerate(moves[i+1:], i+1):
if not char.isdecimal():
break
else:
end += 1
if end > i + 1:
bracket = ' '.join([bracket] * int(moves[i + 1: end]))
if end < len(moves):
if char == "'":
bracket = ' '.join(reverse(bracket.split()))
end += 1
else:
end = len(moves)
moves = moves[:brackets[-1][1]], bracket, moves[end:]
moves = ' '.join(moves).strip()
break
else:
if len(brackets) != 0:
raise MoveError("Brackets are not balanced.")
return moves.split()
def reverse(moves: list) -> list:
"""Return list of reversed moves."""
reverse = []
for move in moves[::-1]:
if move.endswith("'"):
reverse.append(move[:-1])
elif move.endswith('2'):
reverse.append(move)
else:
reverse.append(move + "'")
return reverse
def split_move(move: str, size: int) -> tuple:
"""
Split move into depth, face, turns.
Parameters
----------
move : str
Move to split.
size : int
Number of cubies on each edge of the cube.
Returns
-------
tuple of (list of [int, int], str, int)
A tuple containing:
depth : list of [int, int]
Two values marking the start and end of the depth of the
move. The number of layers being turned is given by
`depth[1] - depth[0]`.
face : {'U', 'L', 'F', 'R', 'B', 'D'}
Centre the turn is rotated about.
turns : int
Number of 90 degree rotations of the turn, postive for
clockwise turns, negative anticlockwise turns.
Raises
------
MoveError
If the move is invalid.
"""
from cube import MoveError
start_move = move
# Calculate depth of move
# Rotations
if move.startswith(('x', 'y', 'z')):
depth = [0, size]
if move.startswith('x'):
move = 'R' + move[1:]
elif move.startswith('y'):
move = 'U' + move[1:]
else:
move = 'F' + move[1:]
# Slice moves
elif move.upper().startswith(('M', 'E', 'S')):
if move[0].isupper():
depth = [size // 2, int(size/2 + 0.5)]
else:
depth = [1, size-1]
if move.upper().startswith('M'):
move = 'L' + move[1:]
elif move.upper().startswith('E'):
move = 'D' + move[1:]
else:
move = 'F' + move[1:]
# Other moves
elif move[0].isdecimal():
for i, char in enumerate(move):
if not char.isdecimal():
break
else:
i += 1
depth = [0, int(move[:i])]
move = move[i:]
if not move:
raise MoveError(f'"{start_move}" is invalid.')
if len(move) == 1:
# e.g., 2U
if move[0].isupper():
depth[0] = depth[1] - 1
# e.g., 2u
else:
move = move.capitalize()
else:
if move.startswith('-'):
depth[0] = depth[1] - 1
move = move[1:]
if not move[0].isdecimal():
raise MoveError(f'"{start_move}" is invalid.')
for i, char in enumerate(move):
if not char.isdecimal():
break
else:
i += 1
depth[1] = int(move[:i])
move = move[i:]
if not move:
raise MoveError(f'"{start_move}" is invalid.')
# e.g., 2-3Uw
if move[0].isupper():
if move[1:2] == 'w':
move = move[0] + move[2:]
else:
raise MoveError(f'"{start_move}" is invalid.')
# e.g., 2-3u
else:
move = move.capitalize()
elif move[0].isupper():
# e.g., 2Uw
if move[1] == 'w':
move = move[0] + move[2:]
# e.g., 2U'
else:
depth[0] = depth[1] - 1
# e.g., 2u'
else:
move = move.capitalize()
# e.g., u
elif move[0].islower():
depth = [0, 2]
move = move.capitalize()
else:
if len(move) > 1:
# e.g., Uw
if move[1] == 'w':
depth = [0, 2]
move = move[0] + move[2:]
# e.g., U'
else:
depth = [0, 1]
# e.g., U
else:
depth = [0, 1]
for i in range(2):
if depth[i] > size:
depth[i] = size
elif depth[i] < 0:
depth[i] = 0
if depth[1] < depth[0]:
depth[1] = depth[0]
# Calculate number of 90 degree turns
if len(move) > 1:
# e.g., U2
if move[1:].isdecimal():
turns = int(move[1:])
# e.g., U'
elif len(move) == 2 and move[1] == "'":
turns = -1
# e.g., U2'
elif move[1:-1].isdecimal() and move[-1] == "'":
turns = -int(move[1:-1])
else:
raise MoveError(f'"{start_move}" is invalid.')
# e.g., U
else:
turns = 1
face = move[0]
if face not in {'U', 'L', 'F', 'R', 'B', 'D'}:
raise MoveError(f'"{start_move}" is invalid.')
return depth, face, turns
```
#### File: cube/_solve/_cage.py
```python
from cube.functions import orient
def edges(self) -> tuple:
"""
Solve edges of a big cube.
Returns
-------
tuple of (list of str, dict of {'EDGES': int})
Moves to solve edges, statistics (move count in ETM).
Notes
-----
The edge in the middle of the FR slot is chosen to be solved. Edges
with the same two coloured stickers are inserted into the E slice
and solved into the FR slot in the correct orientation by using `U`
slice moves and flipping the edge with `L2` or `B2` if needed.
If an edge is incorrectly oriented, use the flipping algorithm `R U
R' F R' F' R` to flip the edges in the FR slot.
The edge is then moved to the U or D layer.
Repeat this until 9 edges are solved.
The last 3 edges are solved by inserting some edges using `U` slice
moves followed by the flipping algorithm, then reversing the slice
moves.
If there are flipped edges in the last edge, rotate the edge to the
UF position then use the parity algorithm `R' U2 L F2 L' F2 R2 U2 R
U2 R' U2 F2 R2 F2` replacing `R` and `L` moves with the slices which
the flipped edges are located.
"""
cube = self.cube
size = self.size
solve = []
mid = size // 2
edges = {
'FL': ((2,'+',0), (1,'+',-1), 'FL'),
'BL': ((4,'+',-1), (1,'+',0), 'BL'),
'BR': ((4,'+',0), (3,'+',-1), 'BR'),
'UF': ((0,-1,'+'), (2,0,'+'), "L' U L"),
'UR': ((0,'+',-1), (3,0,'-'), "F U F'"),
'UL': ((0,'+',0), (1,0,'+'), "F U' F'"),
'UB': ((0,0,'+'), (4,0,'-'), "L' U' L"),
'DF': ((5,0,'+'), (2,-1,'+'), "L D' L'"),
'DR': ((5,'+',-1), (3,-1,'+'), "F' D' F"),
'DL': ((5,'+',0), (1,-1,'-'), "F' D F"),
'DB': ((5,-1,'+'), (4,-1,'-'), "L D L'"),
'FR': ((2,'+',-1), (3,'+',0), 'FR')
}
flip = "R U R' F R' F' R".split()
# First 9 edges
for _ in range((size - 2) * 9):
f, r = cube[2][mid][-1], cube[3][mid][0]
fr = tuple([cube[2][y][-1] for y in range(1, size-1)])
rf = tuple([cube[3][y][0] for y in range(1, size-1)])
# If FR edge is solved
if fr.count(f) == rf.count(r) == size - 2:
for edge in edges.values():
if 0 < edge[0][0] < 5:
continue
a, b = edge[:2]
if isinstance(a[1], int):
a = tuple([cube[a[0]][a[1]][y] for y in range(1, size-1)])
else:
a = tuple([cube[a[0]][y][a[2]] for y in range(1, size-1)])
if isinstance(b[1], int):
b = tuple([cube[b[0]][b[1]][y] for y in range(1, size-1)])
else:
b = tuple([cube[b[0]][y][b[2]] for y in range(1, size-1)])
if a.count(a[0]) == b.count(b[0]) == size - 2:
continue
moves = edge[2]
if 'F' in moves:
moves = moves.replace('F', "F'").replace("''", '')
else:
moves = moves.replace('L', "R'").replace('U', "U'")
moves = moves.replace('D', "D'").replace("''", '')
moves = moves.split()
self.move(moves)
solve.extend(moves)
break
else:
# No empty slots on U or D layer
break
continue
# Find unsolved edge
for edge in edges.values():
for i in range(1, size-1):
sticker1 = list(edge[0])
if '+' in sticker1:
sticker1[sticker1.index('+')] = i
else:
sticker1[sticker1.index('-')] = size - i - 1
sticker1 = cube[sticker1[0]][sticker1[1]][sticker1[2]]
sticker2 = list(edge[1])
if '+' in sticker2:
sticker2[sticker2.index('+')] = i
else:
sticker2[sticker2.index('-')] = size - i - 1
sticker2 = cube[sticker2[0]][sticker2[1]][sticker2[2]]
if {sticker1, sticker2} == {f, r}:
break
else:
continue
break
# Do moves
if len(edge[2]) != 2:
moves = edge[2].split()
self.move(moves)
solve.extend(moves)
edge = 'FL'
else:
edge = edge[2]
if edge == 'BR':
for y in range(1, size-1):
if cube[3][y][-1] == f and cube[4][y][0] == r:
move = f'{y+1}U'
self.move(move)
solve.append(move)
for y in range(1, size-1):
if cube[3][y][-1] == r and cube[4][y][0] == f:
self.move('B2')
solve.append('B2')
edge = 'BL'
if edge == 'BL':
for y in range(1, size-1):
if cube[4][y][-1] == f and cube[1][y][0] == r:
move = f'{y+1}U2'
self.move(move)
solve.append(move)
for y in range(1, size-1):
if cube[4][y][-1] == r and cube[1][y][0] == f:
self.move('L2')
solve.append('L2')
edge = 'FL'
if edge == 'FL':
for y in range(1, size-1):
if cube[1][y][-1] == f and cube[2][y][0] == r:
move = f"{y+1}U'"
self.move(move)
solve.append(move)
for y in range(1, size-1):
if cube[1][y][-1] == r and cube[2][y][0] == f:
self.move('L2')
solve.append('L2')
for y in range(1, size-1):
if cube[4][y][-1] == f and cube[1][y][0] == r:
move = f'{y+1}U2'
self.move(move)
solve.append(move)
# Flipped edge
if edge == 'FR':
depths = []
for y in range(1, size-1):
if cube[2][y][-1] != f:
depths.append(y)
for y in depths:
move = f'{y+1}U'
self.move(move)
solve.append(move)
self.move(flip)
solve.extend(flip)
for y in depths:
move = f"{y+1}U'"
self.move(move)
solve.append(move)
# Last 3 edges
self.move('y')
solve.append('y')
edges = ((3,'+',-1), (4,'+',0), 1), ((4,'+',-1), (1,'+',0), 2)
for solving_edge in range(2):
if solving_edge == 1:
edges = edges[:1]
f, r = cube[2][mid][-1], cube[3][mid][0]
for edge in edges:
# Do moves
depths = [[], []]
for y in range(1, size-1):
sticker1 = cube[edge[0][0]][y][edge[0][2]]
sticker2 = cube[edge[1][0]][y][edge[1][2]]
if {sticker1, sticker2} == {f, r}:
if {cube[2][-y-1][-1], cube[3][-y-1][0]} == {f, r}:
depths[1].append(y)
elif size-y-1 in depths[0]:
depths[1].append(y)
else:
depths[0].append(y)
for i in range(2):
if depths[i]:
for y in depths[i]:
if edge[2] == 1:
move = f'{y+1}U'
else:
move = f'{y+1}U2'
self.move(move)
solve.append(move)
self.move(flip)
solve.extend(flip)
for y in depths[i]:
if edge[2] == 1:
move = f"{y+1}U'"
else:
move = f'{y+1}U2'
self.move(move)
solve.append(move)
elif depths[1]:
self.move(flip)
solve.extend(flip)
# Flipped edge
depths = [y for y in range(1, size-1) if cube[2][y][-1] == f]
if len(depths) > size / 2 - 1:
depths = set(range(1, size-1)) - set(depths)
if depths:
for y in depths:
move = f'{y+1}U'
self.move(move)
solve.append(move)
self.move(flip)
solve.extend(flip)
for y in depths:
move = f"{y+1}U'"
self.move(move)
solve.append(move)
self.move('y')
solve.append('y')
# Parity
parity = "R' U2 L F2 L' F2 R2 U2 R U2 R' U2 F2 R2 F2"
f = cube[2][mid][-1]
depths = [y for y in range(1, size//2) if cube[2][y][-1] != f]
if len(depths) > size / 2 - 1 and not size % 2:
depths = set(range(1, size-1)) - set(depths)
if depths:
self.move("z'")
solve.append("z'")
for move in 'R', 'R2', "R'", 'L', "L'":
moves = ''.join(f'{depth+1}{move} ' for depth in depths)
parity = parity.replace(f'{move} ', moves)
parity = parity.split()
self.move(parity)
solve.extend(parity)
return solve, {'EDGES': len(solve)}
def centres(self) -> tuple:
"""
Solve centres of a big cube while preserving edges.
Returns
-------
tuple of (list of str, dict of {'CENTRES': int})
Moves to solve centres, statistics (move count in ETM).
Notes
-----
For odd layered cubes, the cube is oriented so that the white centre
is on top and the green centre is on the front.
Find the first white centre on the front face, searching left to
right, top to bottom. If the corresponding location on the top face
already has a white centre, turn the top layer so that the
corresponding location is unsolved.
Insert the centre onto the top face using Niklas commutators such as
`[xR, U' yL' U]`, `[xR, U yL' U']` or mirrored left to right, where
xR and yL are the slices which turn the white centre to be inserted.
Repeat until there are no more white centres on the front. Rotate
the cube with `y` moves so that white centres are available to be
inserted from the front, and continue inserting white centres onto
the top face.
If the only remaining white centres are on the bottom face, use
Niklas commutators but with 180 degree slice moves to insert the
rest of the white centres.
Then turn the cube over with `z2` and solve the yellow centre with
the same method. Then rotate the cube with `z'` so that the white
centre is on the right-hand side. For odd layered cubes, the orange
centre is rotated to the top face. Solve the orange centre, then do
`x`, solve the green centre, do `x`, then finally solve the red
centre which solves the blue centre with it.
"""
cube = self.cube
size = self.size
stats = {}
mid = size // 2
if size % 2:
solve = orient(cube)
self.move(solve)
else:
solve = []
for side in 'UDLFR':
count = sum(row[1:-1].count(side) for row in cube[0][1:-1])
for _ in range((size - 2) ** 2 - count):
for s in 2, 1, 3, 4, 5:
if any(side in row[1:-1] for row in cube[s][1:-1]):
break
if s == 1:
self.move("y'")
solve.append("y'")
s = 2
elif s == 3:
self.move('y')
solve.append('y')
s = 2
elif s == 4:
self.move('y2')
solve.append('y2')
s = 2
for y in range(1, size-1):
for x in range(1, size-1):
if cube[s][y][x] == side:
if cube[0][y][x] != side:
setup = ''
elif cube[0][-x-1][y] != side:
setup = 'U'
elif cube[0][x][-y-1] != side:
setup = "U'"
else:
setup = 'U2'
if setup:
self.move(setup)
solve.append(setup)
# Uncomment code to preserve the position of edges.
if y < mid:
y = str(y + 1)
if x < mid:
x = str(x + 1)
moves = (x + "L'", 'U', y + 'R', "U'",
x + 'L', 'U', y + "R'") # , "U'")
else:
x = str(size - x)
moves = (x + 'R', "U'", y + "L'", 'U',
x + "R'", "U'", y + 'L') # , 'U')
else:
y = str(size - y)
if x < mid:
x = str(x + 1)
moves = (x + "L'", "U'", y + 'R', 'U',
x + 'L', "U'", y + "R'") # , 'U')
else:
x = str(size - x)
moves = (x + 'R', 'U', y + "L'", "U'",
x + "R'", 'U', y + 'L') # , "U'")
if s == 5:
moves = ' '.join(moves)
moves = moves.replace('L', 'L2')
moves = moves.replace('R', 'R2')
moves = moves.replace("2'", '2')
moves = moves.split()
self.move(moves)
solve.extend(moves)
# if setup == 'U':
# setup = "U'"
# elif setup == "U'":
# setup = 'U'
# if setup:
# self.move(setup)
# solve.append(setup)
break
else:
continue
break
stats[f'{side} CENTRE'] = len(solve) - sum(stats.values())
if side == 'U':
self.move('z2')
solve.append('z2')
elif side == 'D':
self.move("z'")
solve.append("z'")
if size % 2:
if cube[0][mid][mid] == 'B':
self.move('x')
solve.append('x')
elif cube[0][mid][mid] == 'F':
self.move("x'")
solve.append("x'")
elif cube[0][mid][mid] == 'R':
self.move('x2')
solve.append('x2')
elif side in {'L', 'F'}:
if cube[1][1][1] == 'U':
self.move('y2')
solve.append('y2')
self.move('x')
solve.append('x')
return solve, stats
```
#### File: cube/_solve/__init__.py
```python
from timeit import default_timer
from ._beginners import b_cross, layer1, layer2, eo, co, cp, ep
from ._cage import edges, centres
from ._cfop import cfop, cross, f2l, oll, pll
from ._old_pochmann import op, op_corners, op_edges
from ._optimal import optimal_2x2, optimal_nxn
from ._thistlethwaite import g1, g2, g3, g4
def solve(self, *method: str, cubes: int = 1) -> tuple:
"""
Compute a solution to solve the cube using a given method.
Parameters
----------
*method : str
Method used to solve. If the last argument is an integer or
numeric string, the integer value is assigned to `cubes`, and
the cube will be scrambled and solved that many times.
cubes : int, default=1
Number of cubes to solve.
Returns
-------
tuple
If solving only the current cube, the tuple contains:
moves : list of str
Moves to solve the cube.
stats : dict
Statistics of the solve.
Otherwise, the tuple contains:
cubes : int
Number of cubes scrambled and solved.
stats : tuple of (list of dict, float)
Statistics of each cube, solve time in seconds.
Statistics include:
MOVECOUNT: move count of solve in HTM, QTM, STM, and ETM.
TIME: time the program took to find a solution.
The move count in ETM for each substep in the method.
For Old Pochmann, the piece swaps are described using the
Speffz letter scheme.
Raises
------
CubeError
If any cube was not solvable.
Notes
-----
Methods for different cube sizes:
2x2 - OPTIMAL
Solve cube in the fewest moves (HTM). Solutions are 3-gen, so
only use U, F, and R moves. Note: the first solve will be
significantly slower than average for the program to read the
file.
2x2 - OP or OLD POCHMANN
Solve cube using setup moves and swaps. Corners are solved using
an altered Y-permutation with buffer at A.
3x3 - CFOP or FRIDRICH
Solve cube using the CFOP or Fridrich method. The cross is
solved on the bottom face.
3x3 - BEGINNERS
Solve cube using a beginner's method.
3x3 - OP or OLD POCHMANN
Solve cube using setup moves and swaps. Edges are solved using
the T-permutation with buffer at B. Parity is solved using the
Ra-permutation. Corners are solved using an altered
Y-permutation with buffer at A.
3x3 - THISTLETHWAITE
Solve cube using Thistlethwaite's algorithm.
3x3 - OPTIMAL
Solve cube in as few moves as possible. The more moves required
to solve, the longer it will take to find a solution.
3x3 Substeps:
CROSS: cross
FIRST LAYER: cross, first layer corners
F2L: cross, F2L
OLL: cross, F2L, OLL
CORNERS: Old Pochmann corners
EDGES: Old Pochmann edges
EDGE ORIENTATION: g1
DOMINO REDUCTION: g1, g2
HALF TURN REDUCTION: g1, g2, g3
4x4+ - CAGE
Solve cube by solving edges first, using commutators to solve
centres, and then solving the cube as a 3x3 using CFOP.
4x4+ - OPTIMAL
Solve cube in as few moves as possible. The more moves required
to solve, the longer it will take to find a solution.
4x4+ Substeps:
EDGES: edges
CENTRES: centres preserving edges
REDUCTION: edges, centres
"""
from cube import CubeError
size = self.size
methods = {
2: {
'OPTIMAL': optimal_2x2,
'OP': op_corners,
'OLD POCHMANN': op_corners,
},
3: {
'CFOP': (cross, f2l, oll, pll),
'FRIDRICH': (cross, f2l, oll, pll),
'BEGINNERS': (b_cross, layer1, layer2, eo, co, cp, ep),
'OP': op,
'OLD POCHMANN': op,
'OPTIMAL': optimal_nxn,
'THISTLETHWAITE': (g1, g2, g3, g4),
'CROSS': cross,
'FIRST LAYER': (cross, layer1),
'F2L': (cross, f2l),
'OLL': (cross, f2l, oll),
'EDGES': op_edges,
'CORNERS': op_corners,
'EDGE ORIENTATION': g1,
'DOMINO REDUCTION': (g1, g2),
'HALF TURN REDUCTION': (g1, g2, g3)
},
'4+': {
'CAGE': (edges, centres, cfop),
'OPTIMAL': optimal_nxn,
'EDGES': edges,
'CENTRES': centres,
'REDUCTION': (edges, centres)
}
}
if size > 3:
size = '4+'
if size not in methods:
raise CubeError("Could not solve.")
scramble = cubes > 1
if method:
if isinstance(method[-1], int):
method, cubes = method[:-1], method[-1]
scramble = True
elif method[-1].isdecimal():
method, cubes = method[:-1], int(method[-1])
scramble = True
if method:
method = ' '.join(method).upper()
if method not in methods[size]:
raise ValueError
else:
method = next(iter(methods[size]))
if not cubes:
raise ValueError
start_time = default_timer()
stats = []
for _ in range(cubes):
if scramble:
self.scramble()
start_solve = default_timer()
if isinstance(methods[size][method], tuple):
solve = []
new_stats = {}
for step in methods[size][method]:
step_solve, step_stats = step(self)
if step_solve == False:
raise CubeError("Could not solve.")
solve.extend(step_solve)
new_stats.update(step_stats)
else:
solve, new_stats = methods[size][method](self)
if solve == False:
raise CubeError("Could not solve.")
new_stats.update(self.movecount(solve))
new_stats['TIME'] = default_timer() - start_solve
stats.append(new_stats)
if not scramble:
return solve, new_stats
solve_time = default_timer() - start_time
return cubes, (stats, solve_time)
```
#### File: cube/_solve/_thistlethwaite.py
```python
from .ida_star import ida_star
from cube.functions import orient
def g1(self) -> tuple:
"""
Solve edge orientation.
Returns
-------
tuple of (list of str, dict of {'G1': int})
Moves to solve edge orientation, statistics (move count in ETM).
Notes
-----
Brute-force method using `ida_star` to solve edge orientation.
This stage is complete when all 12 edges are correctly oriented. An
edge is correctly oriented when it can be moved into position in the
solved orientation without F, F', B, or B' moves.
This stage takes a maximum of 7 moves and reduces the cube into a
group requiring only `<U, D, L, R, F2, B2>` moves to solve.
"""
solve = orient(self.cube)
self.move(solve)
edges = (
((0, 0, 1), (4, 0, 1)),
((0, 1, 0), (1, 0, 1)),
((0, 1, -1), (3, 0, 1)),
((0, -1, 1), (2, 0, 1)),
((2, 1, 0), (1, 1, -1)),
((2, 1, -1), (3, 1, 0)),
((4, 1, 0), (3, 1, -1)),
((4, 1, -1), (1, 1, 0)),
((5, 0, 1), (2, -1, 1)),
((5, 1, 0), (1, -1, 1)),
((5, 1, -1), (3, -1, 1)),
((5, -1, 1), (4, -1, 1))
)
def get_bad_edges(cube):
bad_edges = []
for edge in edges:
sticker = cube[edge[0][0]][edge[0][1]][edge[0][2]]
if sticker in {'L', 'R'}:
bad_edges.append((edge[0][0], edge[1][0]))
elif sticker in {'F', 'B'}:
if cube[edge[1][0]][edge[1][1]][edge[1][2]] in {'U', 'D'}:
bad_edges.append((edge[0][0], edge[1][0]))
return bad_edges
def estimate(cube):
bad_edges = get_bad_edges(cube)
if not bad_edges:
return 0
total_edges = len(bad_edges)
edges = [sum(s in edge for edge in bad_edges) for s in (2, 4)]
minimum = min(edges)
maximum = max(edges)
if total_edges == 2:
return 3 + (maximum != 1)
if total_edges == 4:
return 5 - maximum
if total_edges == 6:
return 3 + (edges[0] != 3 != edges[1])
if total_edges == 8:
if maximum == 4 and minimum < 2:
return 7 - minimum
return 6 - minimum
if total_edges == 10:
return 6
return 7
def next_faces(cube, moves):
faces = ['F', 'B', 'U', 'R', 'L', 'D']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
bad_edges = get_bad_edges(cube)
count = [set() for _ in range(6)]
for edge in bad_edges:
count[edge[0]].add(edge[1])
count[edge[1]].add(edge[0])
point_symmetry = []
for face in faces:
index = 'ULFRBD'.index(face)
if not count[index]:
faces.remove(face)
elif len(count[index]) == 4:
if face not in {'F', 'B'}:
faces.remove(face)
else:
point_symmetry.append(face)
elif count[index] in ({0,5}, {1,3}, {2,4}):
point_symmetry.append(face)
return ((face,) if face in point_symmetry else
(face, f'{face}2', f"{face}'") for face in faces)
solve.extend(ida_star(self, estimate, next_faces, 7))
return solve, {'G1': len(solve)}
def g2(self) -> tuple:
"""
Solve domino reduction.
Returns
-------
tuple of (list of str, dict of {'G2': int})
Moves to solve domino reduction, statistics (move count in ETM).
Notes
-----
Brute-force method using `ida_star` to solve domino reduction.
This stage is complete when both the top and bottom faces only have
white and yellow (U and D) stickers.
This stage takes a maximum of 10 moves and reduces the cube into a
group requiring only `<U, D, L2, R2, F2, B2>` moves to solve.
"""
def estimate(cube):
edges = [sum(cube[s][1][x] in {'U', 'D'} for s, x in pieces)
for pieces in (((2,0), (4,-1)), ((2,-1), (4,0)))]
total_edges = sum(edges)
corners = [sum(side[y][x] in {'U', 'D'} for y in (0, -1) for x in (0, -1))
for side in cube[1:5]]
for i in 0, 1:
if corners[i+2] < corners[i]:
corners[i], corners[i+2] = corners[i+2], corners[i]
total_corners = sum(corners)
if not total_edges:
if not total_corners:
return 0
return 7
if total_edges == 1:
if corners in ([0,1,0,3], [1,0,1,1]):
return 3
if corners in ([0,0,2,1], [0,1,0,1], [0,1,0,2], [1,0,2,1]):
return 5
return 6
if total_edges == 2:
if total_corners == 4:
if corners == [0,2,0,2]:
return 1 + (edges[0] == 1)
if corners == [1,1,1,1]:
return 2 + (edges[0] == 1)
if corners == [2,0,2,0]:
return 3 + (edges[0] == 1)
return 6
if not total_corners:
return 4 + (edges[0] == 1)
if corners in ([2,1,2,1], [0,4,0,4], [1,2,1,3]):
return 4 + (edges[0] == 1)
if corners in ([0,0,0,4], [2,0,2,0]):
return 4 + (edges[0] != 1)
return 5
if total_edges == 3:
if corners == [0,1,0,3]:
return 3
if corners in ([1,0,1,1], [1,1,1,3], [1,2,1,2], [2,1,2,1]):
return 4
return 5
if corners == [0,4,0,4]:
return 2
if corners in ([1,2,1,3], [2,2,2,2]):
return 3
if corners in ([2,1,2,1], [4,0,4,0]):
return 4
return 5
def next_faces(cube, moves):
faces = ['R', 'L', 'U', 'D', 'F', 'B']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
ud = {'U', 'D'}
point_symmetry = []
corners = [
0 if cube[s][y][x] in ud else
2 if cube[i][s][s] in ud else
1 for s, y, x, i in (
(0,0,0,1), (0,-1,0,2), (0,-1,-1,3), (0,0,-1,4),
(-1,-1,0,4), (-1,0,0,1), (-1,0,-1,2), (-1,-1,-1,3)
)
]
for face in faces:
if face == 'U':
face_corners = corners[:4]
elif face == 'L':
face_corners = corners[:2] + corners[5:3:-1]
elif face == 'F':
face_corners = corners[1:3] + corners[6:4:-1]
elif face == 'R':
face_corners = corners[2:4] + corners[:5:-1]
elif face == 'B':
face_corners = [corners[i] for i in (0, 3, 7, 4)]
else:
face_corners = corners[4:]
if face_corners[:2] != face_corners[2:]:
continue
s = 'ULFRBD'.index(face)
if face in {'F', 'B'}:
y = {2: -1, 4: 0}[s]
if cube[s][1][0] in ud != cube[s][1][-1] in ud:
continue
if cube[0][y][1] in ud != cube[5][-y-1][1] in ud:
continue
faces.remove(face)
elif face in {'L', 'R'}:
y = {1: 0, 3: -1}[s]
if cube[0][1][y] in ud != cube[5][1][y] in ud:
continue
if cube[2][1][y] in ud != cube[4][1][-y-1] in ud:
continue
if (cube[0][1][y] in ud == cube[2][1][y] in ud and
corners[0] == (corners[1] + 1) % 3):
faces.remove(face)
else:
point_symmetry.append(face)
else:
if cube[s][0][1] in ud != cube[s][-1][1] in ud:
continue
if cube[s][1][0] in ud != cube[s][1][-1] in ud:
continue
if (cube[s][0][1] in ud == cube[s][1][0] in ud and
corners[0] == corners[1]):
faces.remove('U')
else:
point_symmetry.append('U')
return ((face,) if face in point_symmetry else
(f'{face}2',) if face in {'F', 'B'} else
(face, f'{face}2', f"{face}'") for face in faces)
solve = ida_star(self, estimate, next_faces, 10)
return solve, {'G2': len(solve)}
def g3(self) -> tuple:
"""
Solve half turn reduction.
Returns
-------
tuple of (list of str, dict of {'G3': int})
Moves to solve half turn reduction, statistics (move count in
ETM).
Notes
-----
Brute-force method using `ida_star` to solve half turn reduction.
This stage is complete when every face only contains two coloured
stickers and every face has an even number of corners of each
colour.
This stage takes a maximum of 13 moves and reduces the cube into a
group requiring only `<U2, D2, L2, R2, F2, B2>` moves to solve.
"""
def estimate(cube):
faces = 'FB', 'LR', 'FB', 'LR'
edges = [sum(cube[s][y][1] in face for s, face in enumerate(faces, 1))
for y in (0, -1)]
total_edges = sum(edges)
corners = [sum(cube[s][y][x] in {'F', 'B'} for s in (1, 3)
for x in (0, -1)) for y in (0, -1)]
total_corners = sum(corners)
if not total_corners:
if any(sum(side[y][x] == side[1][1] for y in (0, -1)
for x in (0, -1)) % 2 for side in cube[:3]):
return 10
if any(sum(cube[s][y][0] == cube[s][y][-1] for s in opposite) % 2
for y in (0, -1) for opposite in ((0,5), (1,3), (2,4))):
return 9
if not total_edges:
return 0
if total_edges == 2:
return 5
if total_edges == 4:
return 4
if total_edges == 6:
return 7
return 6
if total_corners == 2:
if total_edges == 2:
return 5
return 6
if total_corners == 4:
if max(corners) == 4:
if total_edges == 4:
if corners == edges:
return 1
if max(edges) == 4:
return 5
if edges[0] == 2:
return 3
return 4
return 7
if max(corners) == 3:
return 4
if total_edges == 4:
return 2
return 3
if total_corners == 6:
if corners[0] == 3:
return 5
return 6
if total_edges == 8:
return 2
if total_edges == 4:
return 4
if not total_edges:
return 6
return 5
def next_faces(cube, moves):
faces = ['U', 'D', 'R', 'L', 'F', 'B']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
point_symmetry = []
edges = [side[y][1] in {'F', 'B'} for y in (0, -1)
for side in cube[1:5]]
corners = [cube[s][y][x] in {'F', 'B'} for y in (0, -1) for s in (1, 3)
for x in (0, -1)]
for face in faces:
if face in {'U', 'D'}:
s = {'U': 0, 'D': 4}[face]
if edges[s:s+2] != edges[s+2:s+4]:
continue
if corners[s:s+2] != corners[s+2:s+4]:
continue
if edges[s] == edges[s+1] and corners[s] != corners[s+1]:
faces.remove(face)
else:
point_symmetry.append(face)
else:
s = 'LFRB'.index(face)
if edges[s] != edges[s+4]:
continue
if corners[s] != corners[(s+1)%4+4]:
continue
if corners[(s+1)%4] != corners[(s-1)%4+4]:
continue
faces.remove(face)
return ((face,) if face in point_symmetry else
(face, f'{face}2', f"{face}'") if face in {'U', 'D'} else
(f'{face}2',) for face in faces)
solve = ida_star(self, estimate, next_faces, 13)
return solve, {'G3': len(solve)}
def g4(self) -> tuple:
"""
Solve half turn only state.
Returns
-------
tuple of (list of str, dict of {'G4': int})
Moves to solve half turn only state, statistics (move count in
ETM).
Notes
-----
Brute-force method using `ida_star` to solve half turn only state.
This stage takes a maximum of 15 moves and solves the cube.
"""
def estimate(cube):
axes = (
[cube[s][y if s != 4 else -y-1][1] != cube[s][1][1]
for s in (0, 2, 5, 4) for y in (0, -1)],
[side[1][x] != side[1][1] for side in cube[1:5] for x in (0, -1)],
[cube[s][y][x] != cube[s][1][1]
for s, y, x in ((0,1,0), (0,1,-1), (3,0,1), (3,-1,1),
(5,1,-1), (5,1,0), (1,-1,1), (1,0,1))]
)
edges = []
for axis in axes:
count = sum(axis)
if count == 2:
edges.append(1)
elif count == 4:
if not sum(axis[::2]) % 4:
edges.append(3)
elif not (sum(axis[i] for i in (1, 2, 5, 6))) % 4:
edges.append(3)
else:
edges.append(2)
elif count == 6:
edges.append(3)
elif count == 8:
edges.append(4)
else:
edges.append(0)
corners = [[side[y][x] != side[1][1] for y in (0, -1) for x in (0, -1)]
for side in cube[:3]]
count = sum(sum(side) for side in corners)
if count == 4:
if [0, 1, 1, 0] in corners or [1, 0, 0, 1] in corners:
if sorted(edges) == [1, 2, 3]:
if edges[(1 - corners.index([0, 0, 0, 0])) % 3] == 3:
return 4
return 6
if corners[0] == corners[2]:
if edges == [0, 1, 1]:
if corners[0][0]:
if axes[1][2] and axes[2][0]:
return 1
else:
if axes[1][3] and axes[2][1]:
return 1
return 5
if corners[1] == corners[2]:
if edges == [1, 0, 1]:
if corners[1][0]:
if axes[0][2] and axes[2][2]:
return 1
else:
if axes[0][3] and axes[2][3]:
return 1
return 5
if corners[0] == [corners[1][i] for i in (0, 2, 1, 3)]:
if edges == [1, 1, 0]:
if corners[0][0]:
if axes[0][0] and axes[1][0]:
return 1
else:
if axes[0][1] and axes[0][1]:
return 1
return 5
if sorted(edges) == [0, 1, 3]:
if edges[(1 - corners.index([0, 0, 0, 0])) % 3] == 3:
return 3
return 5
if count == 6:
if [0, 1, 1, 0] in corners or [1, 0, 0, 1] in corners:
return 3 if edges == [2, 2, 2] else 5
return 2 if sorted(edges) == [1, 1, 2] else 4
if count == 8:
if [0, 0, 0, 0] in corners:
if sorted(edges) == [0, 2, 2]:
if not edges[(1 - corners.index([0, 0, 0, 0])) % 3]:
return 2
return 4
if [0, 1, 1, 0] in corners or [1, 0, 0, 1] in corners:
return 4 if sorted(edges) == [2, 3, 3] else 5
return 3 if sorted(edges) == [1, 2, 3] else 5
return 0 if edges == [0, 0, 0] else 6
def next_faces(_, moves):
faces = ['U', 'D', 'R', 'L', 'F', 'B']
if moves:
face = moves[-1][0]
faces.remove(face)
if face in {'L', 'B', 'D'}:
faces.remove({'L': 'R', 'B': 'F', 'D': 'U'}[face])
return ((f'{face}2',) for face in faces)
solve = ida_star(self, estimate, next_faces, 15)
return solve, {'G4': len(solve)}
``` |
{
"source": "17mahera/Ruu-Bot",
"score": 3
} |
#### File: py/tests/test_erlpack_magic_method.py
```python
from erlpack import pack
from erlpack.types import Atom
class User(object):
def __init__(self, name, age):
self.name = name
self.age = age
def __erlpack__(self):
return {
Atom('name'): self.name,
Atom('age'): self.age
}
def test_erlpack_magic_method():
u = User('jake', 23)
assert pack(u) == '\x83t\x00\x00\x00\x02s\x03agea\x17s\x04namem\x00\x00\x00\x04jake'
```
#### File: py/tests/test_true.py
```python
from erlpack import pack
def test_true():
assert pack(True) == '\x83s\x04true'
``` |
{
"source": "17MartelEnterprises/Pandas3",
"score": 3
} |
#### File: Pandas3/pandas3/__init__.py
```python
from .utils import select
from io import StringIO, BytesIO
import pandas as pd
import requests
import boto3
import gzip
class Client(object):
"""Generates a boto3 client for working with Pandas3"""
def __init__(self, aws_access_key_id, aws_secret_access_key, region_name):
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.boto3_session = boto3.client('s3',
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name)
def list_buckets(self):
"""List all buckets for user"""
return self.boto3_session.list_buckets()
def list_files(self, bucket):
"""List all files in a bucket, excluding all `directories`"""
return [{'File Size': x['Size'],
'File Name': x['Key'],
'File Type': x['Key'][x['Key'].rfind('.')+1:].upper(),
'Last Updated': x['LastModified']}
for x in self.boto3_session.list_objects(Bucket=bucket)['Contents'] if x['Size'] > 0]
@staticmethod
def select_df(self, bucket, file, query, header='Use', format='pandas'):
"""S3 Select returning Pandas dataframe or using SQL query."""
valid_headers = {'Use', 'None'}
if header not in valid_headers:
raise ValueError("headers must be one of %r." % valid_headers)
#TODO: query and header handling
def upload_df(self, bucket, df, file_name, compression=True):
"""Upload Pandas Dataframe to S3 storage"""
# write DF to string stream
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False)
file = csv_buffer.getvalue()
if not isinstance(compression, bool):
raise ValueError("compression variable must be boolean value")
if compression:
# reset stream position
csv_buffer.seek(0)
# create binary stream
gz_buffer = BytesIO()
# compress string stream using gzip
with gzip.GzipFile(mode='w', fileobj=gz_buffer) as gz_file:
gz_file.write(bytes(csv_buffer.getvalue(), 'utf-8'))
file = gz_buffer.getvalue()
# write stream to S3
files = {"file": file}
post = self.boto3_session.generate_presigned_post(
Bucket=bucket,
Key=file_name
)
response = requests.post(post["url"], data=post["fields"], files=files)
return response.text if response.text else "Upload Completed"
``` |
{
"source": "17mcpc14/sna",
"score": 3
} |
#### File: 17mcpc14/sna/random.py
```python
import collections
import matplotlib.pyplot as plt
import networkx as nx
def distr(G):
degree_sequence = sorted([d for n, d in G.degree()], reverse=True) # degree sequence
degreeCount = collections.Counter(degree_sequence)
deg, cnt = zip(*degreeCount.items())
fig, ax = plt.subplots()
plt.bar(deg, cnt, width=0.80, color='b')
plt.title("Degree Distribution Histogram")
plt.ylabel("Count")
plt.xlabel("Degree")
ax.set_xticks([d + 0.45 for d in deg])
ax.set_xticklabels(deg)
plt.show()
degreetuples = zip(deg, cnt)
print("Degree distribution touples (degree, count):" , degreetuples)
print("Degree Distribution Analysis of a Random Network")
G = nx.gnp_random_graph(1000, 0.5, seed=None, directed=False)
distr(G)
print("Degree Distribution Analysis of Social network")
G = nx.read_edgelist('facebook_combined.txt',create_using=nx.DiGraph(),nodetype=int)
distr(G)
``` |
{
"source": "17media/pubnub-python",
"score": 3
} |
#### File: pubnub-python/pubnub/crypto.py
```python
import hashlib
import json
from Cryptodome.Cipher import AES
try:
from base64 import decodebytes, encodebytes
except ImportError:
from base64 import decodestring, encodestring
import sys
try:
from hashlib import sha256
digestmod = sha256
except ImportError:
import Cryptodome.Hash.SHA256 as digestmod
sha256 = digestmod.new
if sys.version_info > (3, 0):
v = 3
else:
v = 2
Initial16bytes = '0123456789012345'
def pad(msg, block_size=16):
padding = block_size - (len(msg) % block_size)
if v == 3:
return msg + (chr(padding) * padding).encode('utf-8')
else:
return msg + chr(padding) * padding
def depad(msg):
return msg[0:-ord(msg[-1])]
def get_secret(key):
if v == 3:
return hashlib.sha256(key.encode("utf-8")).hexdigest()
else:
return hashlib.sha256(key).hexdigest()
def encrypt(key, msg):
secret = get_secret(key)
if v == 3:
cipher = AES.new(bytes(secret[0:32], 'utf-8'), AES.MODE_CBC, bytes(Initial16bytes, 'utf-8'))
return encodebytes(cipher.encrypt(pad(msg.encode('utf-8')))).decode('utf-8').replace("\n", "")
else:
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
return encodestring(cipher.encrypt(pad(msg))).replace("\n", "")
def decrypt(key, msg):
secret = get_secret(key)
if v == 3:
cipher = AES.new(bytes(secret[0:32], 'utf-8'), AES.MODE_CBC, bytes(Initial16bytes, 'utf-8'))
plain = depad((cipher.decrypt(decodebytes(msg.encode('utf-8')))).decode('utf-8'))
else:
cipher = AES.new(secret[0:32], AES.MODE_CBC, Initial16bytes)
plain = depad(cipher.decrypt(decodestring(msg)))
try:
return json.loads(plain)
except Exception:
return plain
```
#### File: models/consumer/channel_group.py
```python
class PNChannelGroupsAddChannelResult(object):
pass
class PNChannelGroupsRemoveChannelResult(object):
pass
class PNChannelGroupsRemoveGroupResult(object):
pass
class PNChannelGroupsListResult(object):
def __init__(self, channels):
self.channels = channels
```
#### File: models/consumer/push.py
```python
class PNPushAddChannelResult(object):
pass
class PNPushRemoveChannelResult(object):
pass
class PNPushRemoveAllChannelsResult(object):
pass
class PNPushListProvisionsResult(object):
def __init__(self, channels):
self.channels = channels
```
#### File: tests/functional/test_publish.py
```python
import copy
import unittest
try:
from mock import MagicMock
except ImportError:
from unittest.mock import MagicMock
from pubnub.endpoints.pubsub.publish import Publish
from pubnub.pubnub import PubNub
from tests.helper import pnconf, sdk_name, url_encode
class TestPublish(unittest.TestCase):
def setUp(self):
self.sm = MagicMock(
get_next_sequence=MagicMock(return_value=2)
)
self.pubnub = MagicMock(
spec=PubNub,
config=pnconf,
sdk_name=sdk_name,
_publish_sequence_manager=self.sm
)
self.pubnub.uuid = "UUID_PublishUnitTest"
self.pub = Publish(self.pubnub)
def test_pub_message(self):
message = "hi"
encoded_message = url_encode(message)
self.pub.channel("ch1").message(message)
self.assertEquals(self.pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(self.pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
})
def test_pub_list_message(self):
self.pubnub.uuid = "UUID_PublishUnitTest"
message = ["hi", "hi2", "hi3"]
encoded_message = url_encode(message)
self.pub.channel("ch1").message(message)
self.assertEquals(self.pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(self.pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
})
def test_pub_with_meta(self):
self.pubnub.uuid = "UUID_PublishUnitTest"
message = ["hi", "hi2", "hi3"]
encoded_message = url_encode(message)
meta = ['m1', 'm2']
self.pub.channel("ch1").message(message).meta(meta)
self.assertEquals(self.pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(self.pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'meta': '["m1", "m2"]',
})
def test_pub_store(self):
self.pubnub.uuid = "UUID_PublishUnitTest"
message = ["hi", "hi2", "hi3"]
encoded_message = url_encode(message)
self.pub.channel("ch1").message(message).should_store(True)
self.assertEquals(self.pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(self.pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'store': '1',
})
def test_pub_do_not_store(self):
self.pubnub.uuid = "UUID_PublishUnitTest"
message = ["hi", "hi2", "hi3"]
encoded_message = url_encode(message)
self.pub.channel("ch1").message(message).should_store(False)
self.assertEquals(self.pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(self.pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'store': '0',
})
def test_pub_with_auth(self):
conf = copy.copy(pnconf)
conf.auth_key = "my_auth"
pubnub = MagicMock(
spec=PubNub,
config=conf,
sdk_name=sdk_name,
uuid="UUID_PublishUnitTest",
_publish_sequence_manager=self.sm
)
pub = Publish(pubnub)
message = "hey"
encoded_message = url_encode(message)
pub.channel("ch1").message(message)
self.assertEquals(pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': pubnub.uuid,
'auth': conf.auth_key,
})
def test_pub_encrypted_list_message(self):
conf = copy.copy(pnconf)
conf.cipher_key = "testCipher"
pubnub = MagicMock(
spec=PubNub,
config=conf,
sdk_name=sdk_name,
uuid="UUID_PublishUnitTest",
_publish_sequence_manager=self.sm
)
pub = Publish(pubnub)
message = ["hi", "hi2", "hi3"]
encoded_message = "%22FQyKoIWWm7oN27zKyoU0bpjpgx49JxD04EI%2F0a8rg%2Fo%3D%22"
pub.channel("ch1").message(message)
self.assertEquals(pub.build_path(), "/publish/%s/%s/0/ch1/0/%s"
% (pnconf.publish_key, pnconf.subscribe_key, encoded_message))
self.assertEqual(pub.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': pubnub.uuid,
})
```
#### File: integrational/native_sync/test_ssl.py
```python
import logging
import unittest
import pubnub
from pubnub.exceptions import PubNubException
from pubnub.models.consumer.pubsub import PNPublishResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import pn_vcr
pubnub.set_stream_logger('pubnub', logging.DEBUG)
class TestPubNubPublish(unittest.TestCase):
@pn_vcr.use_cassette('tests/integrational/fixtures/native_sync/ssl/ssl.yaml',
filter_query_parameters=['uuid'])
def test_publish_string_get(self):
pnconf = pnconf_copy()
pnconf.ssl = True
try:
env = PubNub(pnconf).publish() \
.channel("ch1") \
.message("hi") \
.sync()
assert isinstance(env.result, PNPublishResult)
assert env.result.timetoken > 1
except PubNubException as e:
self.fail(e)
```
#### File: integrational/native_threads/test_state.py
```python
import logging
import threading
import unittest
import pubnub
from pubnub.models.consumer.presence import PNSetStateResult, PNGetStateResult
from pubnub.pubnub import PubNub
from tests.helper import pnconf_copy
from tests.integrational.vcr_helper import pn_vcr
pubnub.set_stream_logger('pubnub', logging.DEBUG)
class TestPubNubState(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
def callback(self, response, status):
self.response = response
self.status = status
self.event.set()
@pn_vcr.use_cassette('tests/integrational/fixtures/native_threads/state/state_of_single_channel.yaml',
filter_query_parameters=['uuid'], match_on=['state_object_in_query'])
def test_single_channel(self):
ch = "state-native-sync-ch"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "state-native-sync-uuid"
state = {"name": "Alex", "count": 5}
pubnub.set_state() \
.channels(ch) \
.state(state) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNSetStateResult)
assert self.response.state['name'] == "Alex"
assert self.response.state['count'] == 5
self.event.clear()
pubnub.get_state() \
.channels(ch) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNGetStateResult)
assert self.response.channels[ch]['name'] == "Alex"
assert self.response.channels[ch]['count'] == 5
@pn_vcr.use_cassette('tests/integrational/fixtures/native_threads/state/state_of_multiple_channels.yaml',
filter_query_parameters=['uuid'], match_on=['state_object_in_query'])
def test_multiple_channels(self):
ch1 = "state-native-sync-ch-1"
ch2 = "state-native-sync-ch-2"
pubnub = PubNub(pnconf_copy())
pubnub.config.uuid = "state-native-sync-uuid"
state = {"name": "Alex", "count": 5}
pubnub.set_state() \
.channels([ch1, ch2]) \
.state(state) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNSetStateResult)
assert self.response.state['name'] == "Alex"
assert self.response.state['count'] == 5
self.event.clear()
pubnub.get_state() \
.channels([ch1, ch2]) \
.async(self.callback)
self.event.wait()
assert not self.status.is_error()
assert isinstance(self.response, PNGetStateResult)
assert self.response.channels[ch1]['name'] == "Alex"
assert self.response.channels[ch1]['count'] == 5
assert self.response.channels[ch2]['name'] == "Alex"
assert self.response.channels[ch2]['count'] == 5
```
#### File: integrational/tornado/vcr_tornado_decorator.py
```python
import six
from tests.integrational.vcr_helper import pn_vcr
try:
from mock import patch
except ImportError:
from unittest.mock import patch
def use_cassette_and_stub_time_sleep(cassette_name, **kwargs):
context = pn_vcr.use_cassette(cassette_name, **kwargs)
full_path = "{}/{}".format(pn_vcr.cassette_library_dir, cassette_name)
cs = context.cls(path=full_path).load(path=full_path)
import tornado.gen
@tornado.gen.coroutine
def returner():
return
def _inner(f):
@patch('tornado.gen.sleep', return_value=returner())
@six.wraps(f)
def stubbed(*args, **kwargs):
with context:
largs = list(args)
# 1 - index
largs.pop(1)
return f(*largs, **kwargs)
@six.wraps(f)
def original(*args):
with context:
return f(*args)
return stubbed if len(cs) > 0 else original
return _inner
```
#### File: tests/unit/test_utils.py
```python
import unittest
from pubnub import utils
from pubnub.utils import build_url
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
class TestWriteValueAsString(unittest.TestCase):
def test_string(self):
assert utils.write_value_as_string("blah") == "\"blah\""
assert utils.write_value_as_string(u"blah") == "\"blah\""
def test_bool(self):
assert utils.write_value_as_string(False) == "false"
assert utils.write_value_as_string(True) == "true"
def test_list(self):
assert utils.write_value_as_string(["ch1", "ch2"]) == "[\"ch1\", \"ch2\"]"
def test_tuple(self):
assert utils.write_value_as_string(("ch1", "ch2")) == "[\"ch1\", \"ch2\"]"
class TestUUID(unittest.TestCase):
def test_uuid(self):
assert isinstance(utils.uuid(), str)
assert len(utils.uuid()) == 36
class TestBuildUrl(unittest.TestCase):
def test_build_url(self):
def match(expected_str, actual_str):
expected = urlparse(expected_str)
actual = urlparse(actual_str)
assert expected.scheme == actual.scheme
assert expected.netloc == actual.netloc
assert expected.path == actual.path
self.assertEqual(parse_qs(expected.query), parse_qs(actual.query))
match("http://ex.com/news?a=2&b=qwer",
build_url("http", "ex.com", "/news", "a=2&b=qwer"))
match("https://ex.com/?a=2&b=qwer",
build_url("https", "ex.com", "/", "a=2&b=qwer"))
class TestJoin(unittest.TestCase):
def test_join_items_and_encode(self):
assert "a%2Fb,c%20d" == utils.join_items_and_encode(['a/b', 'c d'])
class TestPreparePAMArguments(unittest.TestCase):
def test_prepare_pam_arguments(self):
params = {
'abc': True,
'poq': 4,
'def': False
}
result = utils.prepare_pam_arguments(params)
assert result == 'abc=True&def=False&poq=4'
def test_sign_sha_256(self):
input = """sub-c-7ba2ac4c-4836-11e6-85a4-0619f8945a4f
pub-c-98863562-19a6-4760-bf0b-d537d1f5c582
grant
channel=asyncio-pam-FI2FCS0A&pnsdk=PubNub-Python-Asyncio%252F4.0.2&r=1×tamp=1468409553&uuid=a4dbf92e-e5cb-428f-b6e6-35cce03500a2&w=1""" # noqa: E501
result = utils.sign_sha256("my_key", input)
assert "ty5TgZtcl-wWkdNCbW--IHg_DPG7ryhfqxJnZhjmhD8=" == result
``` |
{
"source": "17minutes/AgentNet",
"score": 2
} |
#### File: agentnet/learning/dpg_n_step.py
```python
from __future__ import division, print_function, absolute_import
import theano
import theano.tensor as T
from lasagne.objectives import squared_error
from .helpers import get_n_step_value_reference, get_end_indicator
from ..utils.grad import consider_constant
def get_elementwise_objective_components(policy,
rewards,
policy_values,
action_values='same',
is_alive="always",
n_steps=None,
gamma_or_gammas=0.99,
crop_last = True,
force_values_after_end=True,
state_values_after_end="zeros",
consider_value_reference_constant=True,
consider_predicted_value_constant=True,
scan_dependencies=tuple(),
scan_strict=True,
):
"""
N-step Deterministic Policy Gradient (A2c) implementation.
Works with continuous action space (real value or vector of such)
Requires action policy(mu) and state values.
Based on
http://arxiv.org/abs/1509.02971
http://jmlr.org/proceedings/papers/v32/silver14.pdf
This particular implementation also allows N-step reinforcement learning
The code mostly relies on the same architecture as advantage actor-critic a2c_n_step
returns deterministic policy gradient components for actor and critic
L_policy = -critic(state,policy) = -action_values
L_V = (V - Vreference)^2
You will have to independently compute updates for actor and critic and then add them up.
parameters:
policy [batch,tick,action_id] - predicted "optimal policy" (mu)
rewards [batch,tick] - immediate rewards for taking actions at given time ticks
policy_values [batch,tick] - predicted state values given OPTIMAL policy
action_values [batch,tick] - predicted Q_values for commited actions INCLUDING EXPLORATION if any
Default value implies action_values = state_values if we have no exploration
is_alive [batch,tick] - whether given session is still active at given tick. Defaults to always active.
Default value of is_alive implies a simplified computation algorithm for Qlearning loss
n_steps: if an integer is given, the references are computed in loops of 3 states.
Defaults to None: propagating rewards throughout the whole session.
If n_steps equals 1, this works exactly as Q-learning (though less efficient one)
If you provide symbolic integer here AND strict = True, make sure you added the variable to dependencies.
gamma_or_gammas - a single value or array[batch,tick](can broadcast dimensions) of delayed reward discounts
crop_last - if True, zeros-out loss at final tick, if False - computes loss VS Qvalues_after_end
force_values_after_end - if true, sets reference policy at session end to rewards[end] + qvalues_after_end
state_values_after_end[batch,1,n_actions] - "next state values" for last tick used for reference only.
Defaults at T.zeros_like(state_values[:,0,None,:])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
scan_dependencies: everything you need to evaluate first 3 parameters (only if strict==True)
scan_strict: whether to evaluate values using strict theano scan or non-strict one
Returns:
Element-wise sum of policy_loss + state_value_loss
"""
if action_values == 'same':
action_values = policy_values
# get reference values via DPG algorithm
reference_action_values = get_n_step_value_reference(action_values,
rewards,
is_alive,
n_steps=n_steps,
optimal_state_values_after_end=state_values_after_end,
gamma_or_gammas=gamma_or_gammas,
dependencies=scan_dependencies,
strict=scan_strict
)
if is_alive != "always" and force_values_after_end:
# if asked to force reference_Q[end_tick+1,a] = 0, do it
# note: if agent is always alive, this is meaningless
# set future rewards at session end to rewards+qvalues_after_end
end_ids = get_end_indicator(is_alive, force_end_at_t_max=True).nonzero()
if state_values_after_end == "zeros":
# "set reference state values at end action ids to just the immediate rewards"
reference_action_values = T.set_subtensor(reference_action_values[end_ids], rewards[end_ids])
else:
# "set reference state values at end action ids to the immediate rewards + qvalues after end"
new_subtensor_values = rewards[end_ids] + gamma_or_gammas * state_values_after_end[end_ids[0], 0]
reference_action_values = T.set_subtensor(reference_action_values[end_ids], new_subtensor_values)
# now compute the loss components
if is_alive == "always":
is_alive = T.ones_like(action_values, dtype=theano.config.floatX)
# actor loss
# here we rely on fact that state_values = critic(state,optimal_policy)
# using chain rule,
# grad(state_values,actor_weights) = grad(state_values, optimal_policy)*grad(optimal_policy,actor_weights)
policy_loss_elwise = -policy_values
# critic loss
reference_action_values = consider_constant(reference_action_values)
v_err_elementwise = squared_error(reference_action_values, action_values)
if crop_last:
v_err_elementwise = T.set_subtensor(v_err_elementwise[:,-1],0)
return policy_loss_elwise * is_alive, v_err_elementwise * is_alive
```
#### File: agentnet/learning/helpers.py
```python
from __future__ import division, print_function, absolute_import
from warnings import warn
import theano
import theano.tensor as T
from ..utils import insert_dim
def get_n_step_value_reference(state_values,
rewards,
is_alive="always",
n_steps=None,
gamma_or_gammas=0.99,
optimal_state_values="same_as_state_values",
optimal_state_values_after_end="zeros",
dependencies=tuple(),
crop_last=True,
strict=True):
"""
Computes the reference for state value function via n-step algorithm:
Vref = r(t) + gamma*r(t+1) + gamma^2*r(t+2) + ... + gamma^n*t(t+n) where n == n_steps
Used by all n_step methods, including Q-learning, a2c and dpg
Works with both Q-values and state values, depending on aggregation_function
:param state_values: float[batch,tick] predicted state values V(s) at given batch session and time tick
:param rewards: - float[batch,tick] rewards achieved by commiting actions at [batch,tick]
:param is_alive: whether the session is still active int/bool[batch_size,time]
:param n_steps: if an integer is given, the references are computed in loops of n_steps
Every n_steps'th step reference is set to V = r + gamma * next V_predicted_
On other steps, reference is propagated V = r + gamma * next V reference
Defaults to None: propagating rewards throughout the whole session.
Widely known as "lambda" in RL community (TD-lambda, Q-lambda) plus or minus one :)
If n_steps equals 1, this works exactly as regular TD (though a less efficient one)
If you provide symbolic integer here AND strict = True, make sure you added the variable to dependencies.
:param gamma_or_gammas: delayed reward discount number, scalar or vector[batch_size]
:param optimal_state_values: state values given optimal actions.
- for Q-learning, it's max over Q-values
- for state-value based methods (a2c, dpg), it's same as state_values (defaults to that)
:param optimal_state_values_after_end: - symbolic expression for "next state values" for last tick used for reference only.
Defaults at T.zeros_like(values[:,0,None,:])
If you wish to simply ignore the last tick,
use defaults and crop output's last tick ( qref[:,:-1] )
:param dependencies: everything else you need to evaluate first 3 parameters (only if strict==True)
:param strict: whether to evaluate values using strict theano scan or non-strict one
:returns: V reference [batch,action_at_tick] according n-step algorithms ~ eligibility traces
e.g. mentioned here http://arxiv.org/pdf/1602.01783.pdf as A3c and k-step Q-learning
"""
# check dimensions
if state_values.ndim != 2:
if state_values.ndim == 3:
warn("""state_values must have shape [batch,tick] (ndim = 2).
Currently assuming state_values you provided to have shape [batch, tick,1].
Working with state_values[:,:,0].
If that isn't what you intended, fix state_values shape to [batch,tick]""")
state_values = state_values[:, :, 0]
else:
raise ValueError("state_values must have shape [batch,tick] (ndim = 2),"
"while you have" + str(state_values.ndim))
# handle aggregation function
if optimal_state_values == "same_as_state_values":
optimal_state_values = state_values
# fill default values
if is_alive == "always":
is_alive = T.ones_like(rewards)
if crop_last:
#TODO rewrite by precomputing correct td-0 qvalues here to clarify notation
#alter tensors so that last reference = last prediction
is_alive = T.set_subtensor(is_alive[:,-1],1)
rewards = T.set_subtensor(rewards[:,-1],0)
next_state_values = T.concatenate([optimal_state_values[:, 1:] * is_alive[:, 1:],
state_values[:,-1:]/gamma_or_gammas], axis=1)
else:
#crop_last == False
if optimal_state_values_after_end == "zeros":
optimal_state_values_after_end = T.zeros_like(optimal_state_values[:, :1])
# get "Next state_values": floatX[batch,time] at each tick
# do so by shifting state_values backwards in time, pad with state_values_after_end
next_state_values = T.concatenate(
[optimal_state_values[:, 1:] * is_alive[:, 1:], optimal_state_values_after_end], axis=1)
# initialize each reference with ZEROS after the end (won't be in output tensor)
outputs_info = [T.zeros_like(rewards[:, 0]), ]
non_seqs = (gamma_or_gammas,) + tuple(dependencies)
if n_steps is None:
tmax_indicator = T.zeros((rewards.shape[1],),dtype='uint8')
tmax_indicator = T.set_subtensor(tmax_indicator[-1],1)
else:
time_ticks = T.arange(rewards.shape[1])
tmax_indicator = T.eq(time_ticks%n_steps,0)
tmax_indicator = T.set_subtensor(tmax_indicator[-1], 1).astype('uint8')
sequences = [rewards.T,
is_alive.T,
next_state_values.T, # transpose to iterate over time, not over batch
tmax_indicator.T]
# recurrent computation of reference state values (backwards through time)
def backward_V_step(rewards,
is_alive,
next_Vpred,
is_tmax,
next_Vref,
*args #you won't dare delete me
):
"""scan inner computation step, going backwards in time
params:
rewards, is_alive, next_Vpred, time_i - sequences
next_Vref - recurrent state value for next turn
returns:
current_Vref - recurrent state value at this turn
current_Vref is computed thus:
Once every n_steps or at session end:
current_Vref = r + gamma*next_Vpred #computation through next predicted state value
Otherwise:
current_Vref = r + gamma*next_Vref #recurrent computation through next Qvalue
"""
propagated_Vref = rewards + gamma_or_gammas * next_Vref # propagates value from actual next action
optimal_Vref = rewards + gamma_or_gammas * next_Vpred # uses agent's prediction for next state
# pick new_Vref if is_Tmax, else propagate existing one
chosen_Vref = T.switch(is_tmax, optimal_Vref, propagated_Vref)
# zero out references if session has ended already
this_Vref = T.switch(is_alive, chosen_Vref, 0.)
return this_Vref
reference_state_values = theano.scan(backward_V_step,
sequences=sequences,
non_sequences=non_seqs,
outputs_info=outputs_info,
go_backwards=True,
strict=strict
)[0] # shape: [time_seq_inverted, batch]
reference_state_values = reference_state_values.T[:, ::-1] # [batch, time_seq]
return reference_state_values
# minor helpers
def get_action_Qvalues(Qvalues, actions):
"""
Auxiliary function to select Q-values corresponding to actions taken.
Returns Q-values predicted that resulted in actions: float[batch,tick]
"""
batch_i = T.arange(Qvalues.shape[0])[:, None]
time_i = T.arange(Qvalues.shape[1])[None, :]
action_Qvalues_predicted = Qvalues[batch_i, time_i, actions]
return action_Qvalues_predicted
def get_end_indicator(is_alive, force_end_at_t_max=False):
"""
Auxiliary function to transform session alive indicator into end action indicator
If force_end_at_t_max is True, all sessions that didn't end by the end of recorded sessions
are ended at the last recorded tick."""
# session-ending action indicator: uint8[batch,tick]
is_end = T.eq(is_alive[:, :-1] - is_alive[:, 1:], 1)
if force_end_at_t_max:
session_ended_before = T.neq(T.sum(is_end, axis=1, keepdims=True),0)
is_end_at_tmax = 1 - T.gt(session_ended_before, 0)
else:
is_end_at_tmax = T.zeros((is_end.shape[0], 1), dtype=is_end.dtype)
is_end = T.concatenate([is_end, is_end_at_tmax], axis=1)
return is_end
def ravel_alive(is_alive, *args):
"""
Takes all is_alive ticks from all sessions and merges them into 1 dimension
"""
alive_selector = is_alive.nonzero()
return [arg[alive_selector] for arg in args]
```
#### File: agentnet/learning/qlearning_n_step.py
```python
from __future__ import division, print_function, absolute_import
import theano.tensor as T
from lasagne.objectives import squared_error
from .helpers import get_n_step_value_reference, get_end_indicator, get_action_Qvalues
from ..utils.grad import consider_constant
def get_elementwise_objective(Qvalues, actions, rewards,
is_alive="always",
Qvalues_target=None,
n_steps=None,
gamma_or_gammas=0.95,
crop_last=True,
force_qvalues_after_end=True,
optimal_qvalues_after_end="zeros",
consider_reference_constant=True,
aggregation_function=lambda qv: T.max(qv, axis=-1),
return_reference=False,
scan_dependencies=(),
scan_strict=True):
"""
Returns squared error between predicted and reference Q-values according to n-step Q-learning algorithm
Qreference(state,action) = reward(state,action) + gamma*reward(state_1,action_1) + ... + gamma^n * max[action_n]( Q(state_n,action_n)
loss = mean over (Qvalues - Qreference)**2
:param Qvalues: [batch,tick,action_id] - predicted qvalues
:param actions: [batch,tick] - commited actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param is_alive: [batch,tick] - whether given session is still active at given tick. Defaults to always active.
Default value of is_alive implies a simplified computation algorithm for Qlearning loss
:param Qvalues_target: Older snapshot Qvalues (e.g. from a target network). If None, uses current Qvalues
:param n_steps: if an integer is given, the references are computed in loops of 3 states.
Defaults to None: propagating rewards throughout the whole session.
If n_steps equals 1, this works exactly as Q-learning (though less efficient one)
If you provide symbolic integer here AND strict = True, make sure you added the variable to dependencies.
:param gamma_or_gammas: delayed reward discounts: a single value or array[batch,tick](can broadcast dimensions).
:param crop_last: if True, zeros-out loss at final tick, if False - computes loss VS Qvalues_after_end
:param force_qvalues_after_end: if true, sets reference Qvalues at session end to rewards[end] + qvalues_after_end
:param optimal_qvalues_after_end: [batch,1] - symbolic expression for "best next state q-values" for last tick
used when computing reference Q-values only.
Defaults at T.zeros_like(Q-values[:,0,None,0])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:param consider_reference_constant: whether or not zero-out gradient flow through reference_Qvalues
(True is highly recommended)
:param aggregation_function: a function that takes all Qvalues for "next state Q-values" term and returns what
is the "best next Q-value". Normally you should not touch it. Defaults to max over actions.
Normally you shouldn't touch this
Takes input of [batch,n_actions] Q-values
:param return_reference: if True, returns reference Qvalues.
If False, returns squared_error(action_Qvalues, reference_Qvalues)
:param scan_dependencies: everything you need to evaluate first 3 parameters (only if strict==True)
:param scan_strict: whether to evaluate Qvalues using strict theano scan or non-strict one
:return: mean squared error over Q-values (using formula above for loss)
"""
if Qvalues_target is None:
Qvalues_target = Qvalues
assert Qvalues.ndim == Qvalues_target.ndim == 3
assert actions.ndim == rewards.ndim ==2
if is_alive != 'always': assert is_alive.ndim==2
# get Qvalues of best actions (used every K steps for reference Q-value computation
optimal_Qvalues_target = aggregation_function(Qvalues_target)
# get predicted Q-values for committed actions by both current and target networks
# (to compare with reference Q-values and use for recurrent reference computation)
action_Qvalues = get_action_Qvalues(Qvalues, actions)
action_Qvalues_target = get_action_Qvalues(Qvalues_target, actions)
# get reference Q-values via Q-learning algorithm
reference_Qvalues = get_n_step_value_reference(
state_values=action_Qvalues_target,
rewards=rewards,
is_alive=is_alive,
n_steps=n_steps,
gamma_or_gammas=gamma_or_gammas,
optimal_state_values=optimal_Qvalues_target,
optimal_state_values_after_end=optimal_qvalues_after_end,
dependencies=scan_dependencies,
strict=scan_strict,
crop_last=crop_last,
)
if consider_reference_constant:
# do not pass gradient through reference Qvalues (since they DO depend on Qvalues by default)
reference_Qvalues = consider_constant(reference_Qvalues)
if force_qvalues_after_end and is_alive != "always":
# if asked to force reference_Q[end_tick+1,a] = 0, do it
# note: if agent is always alive, this is meaningless
# set future rewards at session end to rewards+qvalues_after_end
end_ids = get_end_indicator(is_alive, force_end_at_t_max=True).nonzero()
if optimal_qvalues_after_end == "zeros":
# "set reference Q-values at end action ids to just the immediate rewards"
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids], rewards[end_ids])
else:
# "set reference Q-values at end action ids to the immediate rewards + qvalues after end"
new_reference_values = rewards[end_ids] + gamma_or_gammas * optimal_qvalues_after_end
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids], new_reference_values[end_ids[0], 0])
#If asked, make sure loss equals 0 for the last time-tick.
if crop_last:
reference_Qvalues = T.set_subtensor(reference_Qvalues[:,-1],action_Qvalues[:,-1])
if return_reference:
return reference_Qvalues
else:
# tensor of elementwise squared errors
elwise_squared_error = squared_error(reference_Qvalues, action_Qvalues)
return elwise_squared_error * is_alive
```
#### File: agentnet/learning/sarsa.py
```python
from __future__ import division, print_function, absolute_import
import theano.tensor as T
import theano
import numpy as np
from lasagne.objectives import squared_error
from .helpers import get_end_indicator, get_action_Qvalues
from ..utils.grad import consider_constant
from ..utils import create_shared
default_gamma = create_shared('sarsa_gamma_default', np.float32(0.99), theano.config.floatX)
def get_reference_Qvalues(Qvalues,
actions,
rewards,
gamma_or_gammas=default_gamma,
qvalues_after_end="zeros"
):
"""
Returns reference Qvalues according to State-Action-Reward-State-Action (SARSA) algorithm
:param Qvalues: [batch,tick,action_id] - predicted Q-values
:param actions: [batch,tick] - committed actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param gamma_or_gammas: a single value or array[batch,tick](can broadcast dimensions) of delayed reward discounts
:param qvalues_after_end: symbolic expression for "future rewards" term for last tick used for reference only.
Defaults at T.zeros_like(rewards[:,0,None])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:return: Qreference - reference Q-values at [batch,tick] using formula
Q reference [batch,action_at_tick] = rewards[t] + gamma_or_gammas * Qs(t+1,action[t+1])
Where action[t+1] is simply action that agent took at next time tick [padded with qvalues_after_end]
"""
if qvalues_after_end == "zeros":
qvalues_after_end = T.zeros_like(rewards[:, 0, None])
# Q-values for "next" states (missing last tick): float[batch,tick-1,action]
next_Qvalues_predicted = Qvalues[:, 1:]
# actions committed at next ticks (missing last tick): int[batch,tick-1]
next_actions = actions[:, 1:]
future_rewards_estimate = get_action_Qvalues(next_Qvalues_predicted, next_actions)
# adding the last tick
future_rewards_estimate = T.concatenate(
[
future_rewards_estimate,
qvalues_after_end,
],
axis=1
)
# full Q-value formula (SARSA algorithm)
reference_Qvalues = rewards + gamma_or_gammas * future_rewards_estimate
return reference_Qvalues
def get_elementwise_objective(Qvalues,
actions,
rewards,
is_alive="always",
Qvalues_target=None,
gamma_or_gammas=0.95,
crop_last=True,
force_qvalues_after_end=True,
qvalues_after_end="zeros",
consider_reference_constant=True, ):
"""
Returns squared error between predicted and reference Qvalues according to Q-learning algorithm
Qreference(state,action) = reward(state,action) + gamma* Q(next_state,next_action)
loss = mean over (Qvalues - Qreference)**2
:param Qvalues: [batch,tick,action_id] - predicted qvalues
:param actions: [batch,tick] - commited actions
:param rewards: [batch,tick] - immediate rewards for taking actions at given time ticks
:param is_alive: [batch,tick] - whether given session is still active at given tick. Defaults to always active.
Default value of is_alive implies a simplified computation algorithm for Qlearning loss
:param Qvalues_target: Older snapshot Qvalues (e.g. from a target network). If None, uses current Qvalues
:param gamma_or_gammas: a single value or array[batch,tick](can broadcast dimensions) of delayed reward discounts
:param crop_last: if True, zeros-out loss at final tick, if False - computes loss VS Qvalues_after_end
:param force_qvalues_after_end: if true, sets reference Qvalues at session end to rewards[end] + qvalues_after_end
:param qvalues_after_end: [batch,1,n_actions] - symbolic expression for "next state q-values" for last tick used for reference only.
Defaults at T.zeros_like(Qvalues[:,0,None,:])
If you wish to simply ignore the last tick, use defaults and crop output's last tick ( qref[:,:-1] )
:param consider_reference_constant: whether or not zero-out gradient flow through reference_Qvalues
(True is highly recommended unless you know what you're doind)
:return: tensor [batch, tick] of squared errors over Qvalues (using formula above for loss)
"""
if Qvalues_target is None:
Qvalues_target = Qvalues
assert Qvalues.ndim == Qvalues_target.ndim == 3
assert actions.ndim == rewards.ndim ==2
if is_alive != 'always': assert is_alive.ndim==2
# get reference Qvalues via Q-learning algorithm
reference_Qvalues = get_reference_Qvalues(Qvalues_target, actions, rewards,
gamma_or_gammas=gamma_or_gammas,
qvalues_after_end=qvalues_after_end,
)
if consider_reference_constant:
# do not pass gradient through reference Q-values (since they DO depend on Q-values by default)
reference_Qvalues = consider_constant(reference_Qvalues)
# get predicted qvalues for committed actions (to compare with reference Q-values)
action_Qvalues = get_action_Qvalues(Qvalues, actions)
# if agent is always alive, return the simplified loss
if is_alive == "always":
# tensor of element-wise squared errors
elwise_squared_error = squared_error(reference_Qvalues, action_Qvalues)
else:
# we are given an is_alive matrix : uint8[batch,tick]
# if asked to force reference_Q[end_tick+1,a] = 0, do it
# note: if agent is always alive, this is meaningless
if force_qvalues_after_end:
# set future rewards at session end to rewards + qvalues_after_end
end_ids = get_end_indicator(is_alive, force_end_at_t_max=True).nonzero()
if qvalues_after_end == "zeros":
# "set reference Q-values at end action ids to just the immediate rewards"
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids], rewards[end_ids])
else:
last_optimal_rewards = T.zeros_like(rewards[:, 0])
# "set reference Q-values at end action ids to the immediate rewards + qvalues after end"
reference_Qvalues = T.set_subtensor(reference_Qvalues[end_ids],
rewards[end_ids] + gamma_or_gammas * last_optimal_rewards[
end_ids[0], 0]
)
# tensor of element-wise squared errors
elwise_squared_error = squared_error(reference_Qvalues, action_Qvalues)
# zero-out loss after session ended
elwise_squared_error = elwise_squared_error * is_alive
if crop_last:
elwise_squared_error = T.set_subtensor(elwise_squared_error[:,-1],0)
return elwise_squared_error
```
#### File: agentnet/resolver/__init__.py
```python
from .base import *
from .epsilon_greedy import *
from .probabilistic import *
def ProbablisticResolver(*args,**kwargs):
raise ValueError("Use Probabilistic resolver (with i)")
```
#### File: agentnet/utils/format.py
```python
from collections import OrderedDict
from warnings import warn
import lasagne
import numpy as np
def is_layer(var):
"""checks if var is lasagne layer"""
return isinstance(var, lasagne.layers.Layer)
def is_theano_object(var):
"""checks if var is a theano input, transformation, constant or shared variable"""
return type(var).__module__.startswith("theano")
def is_numpy_object(var):
"""checks if var is a theano input, transformation, constant or shared variable"""
return type(var).__module__.startswith("numpy")
supported_sequences = (tuple, list)
def check_sequence(variables):
"""
Ensure that variables is one of supported_sequences or converts to one.
If naive conversion fails, throws an error.
"""
if any(isinstance(variables, seq) for seq in supported_sequences):
return variables
else:
# If it is a numpy or theano array, excluding numpy array of objects, return a list with single element
# Yes, i know it's messy. Better options are welcome for pull requests :)
if (is_theano_object(variables) or is_numpy_object(variables)) and variables.dtype != np.object:
return [variables]
elif hasattr(variables, '__iter__'):
# Elif it is a different kind of sequence try casting to tuple. If cannot, treat that it will be treated
# as an atomic object.
try:
tupled_variables = tuple(variables)
message = """{variables} of type {var_type} will be treated as a sequence of {len_casted} elements,
not a single element.
If you want otherwise, please pass it as a single-element list/tuple.
"""
warn(message.format(variables=variables, var_type=type(variables), len_casted=len(tupled_variables)))
return tupled_variables
except:
message = """
{variables} of type {var_type} will be treated as a single input/output tensor,
and not a collection of such.
If you want otherwise, please cast it to list/tuple.
"""
warn(message.format(variables=variables, var_type=type(variables)))
return [variables]
else:
# otherwise it's a one-element list
return [variables]
def check_list(variables):
"""Ensure that variables is a list or converts to one.
If naive conversion fails, throws an error
:param variables: sequence expected
"""
return list(check_sequence(variables))
def check_tuple(variables):
"""Ensure that variables is a list or converts to one.
If naive conversion fails, throws an error
:param variables: sequence expected
"""
return tuple(check_sequence(variables))
def check_ordered_dict(variables):
"""Ensure that variables is an OrderedDict
:param variables: dictionary expected
"""
assert isinstance(variables, dict)
try:
return OrderedDict(list(variables.items()))
except:
raise ValueError("Could not convert {variables} to an ordered dictionary".format(variables=variables))
def unpack_list(array, parts_lengths):
"""
Returns slices of the input list a.
unpack_list(a, [2,3,5]) -> a[:2], a[2:2+3], a[2+3:2+3+5]
:param array: array-like or tensor variable
:param parts_lengths: lengths of subparts
"""
borders = np.concatenate([[0], np.cumsum(parts_lengths)])
groups = []
for low, high in zip(borders[:-1], borders[1:]):
groups.append(array[low:high])
return groups
```
#### File: agentnet/utils/tensor_ops.py
```python
from theano import tensor as T
def norm(x, axis=-1, keepdims=True):
"""Compute l2 norm of x along axis"""
return T.sqrt((x ** 2).sum(axis=axis, keepdims=keepdims))
def normalize(x, axis=-1):
"""return x divided by norm(x)
If an element has zero norm, normalized element will still be zeros"""
norms = norm(x, axis=axis, keepdims=True)
return T.switch(T.eq(norms, 0), 0, x / norms)
def linspace(start, stop, num_units, dtype="float32"):
"""a minimalistic symbolic equivalent of numpy.linspace"""
return start + T.arange(num_units, dtype=dtype) * (stop - start) / (num_units - 1)
def in1d(arr, in_arr):
"""for each element in arr returns 1 if in_arr contains this element, otherwise 0
Output shape matches arr shape, in_arr must be 1d"""
return T.eq(arr.reshape([1, -1]), in_arr.reshape([-1, 1])).any(axis=0).reshape(arr.shape)
def prefix_ravel(seq, n_raveled_dim=2):
"""ravels first n_raveled_dimensions of seq into one
p.e. if you have dimemsions of [batch_size,time_step,n_units],
than prefix_ravel with 2 raveled dimensions will have [batch_and_time,n_units] dimension"""
new_ndim = seq.ndim - n_raveled_dim + 1
new_shape = T.concatenate([[-1], seq.shape[n_raveled_dim:]])
return seq.reshape(new_shape, ndim=new_ndim)
def append_dim(arg):
"""add 1 fake dimension to the end of arg"""
return arg.reshape([i for i in arg.shape] + [1])
def insert_dim(arg, pos):
"""insert 1 fake dimension inside the arg before pos'th dimension"""
shape = [i for i in arg.shape]
shape.insert(pos, 1)
return arg.reshape(shape)
``` |
{
"source": "17twenty/PythonUnitTesting",
"score": 3
} |
#### File: 17twenty/PythonUnitTesting/football.py
```python
import unittest
import libdiffdata as ld
def grabGoalsFA(line):
tokens = line.split()
return (int(tokens[6]), int(tokens[8]))
def collateFAFromFile(filename):
lines = open(filename).readlines()
del lines[0]
return [grabGoalsFA(line) for line in lines if "--" not in line]
def lowestGoalDiff(filename):
return ld.indexOfLowestVariance(collateFAFromFile(filename))
#
# Tests
#
class FootballKataTestSuite(unittest.TestCase):
def testLineParsing(self):
self.assertEqual(grabGoalsFA(" 2. Manchester_U 38 28 5 5 89 - 33 89"), (89, 33), "Parse goal scores")
def testReadAndParseFile(self):
foo = collateFAFromFile("football.txt")
self.assertEqual(foo[0], (93, 29), "Tuple mismatch for expected first goal FA")
self.assertEqual(foo[-1], (40, 82), "Tuple mismatch for expected last goal FA")
def testIndexOfLowestDiff(self):
self.assertEqual(lowestGoalDiff("football.txt"), 13, "Find the right diff from test data set")
if __name__ == '__main__':
unittest.main()
```
#### File: 17twenty/PythonUnitTesting/libdiffdata.py
```python
import unittest
def indexOfMin(values):
return values.index(min(values))
def indexOfMinDiffs(diffs):
return indexOfMin([abs(a-b) for a,b in diffs])
def indexOfLowestVariation(lowGoals, highGoals):
""" Returns index of variation between list of goals for, goals against """
tuples = zip(highGoals, lowGoals)
return indexOfMinDiffs(tuples)
def indexOfLowestVariance(dataset):
return indexOfMinDiffs(dataset)+1
def parseLine():
pass
#
# Tests
#
class LibDiffDataTestSuite(unittest.TestCase):
def testVariation(self):
self.assertEqual(indexOfLowestVariation([1,1,1],[3,2,3]), 1, "lowest variation found in dataset")
def testIndexOfMin(self):
self.assertEqual(indexOfMin([2,3,4,1,2,3,4]), 3, "Index of minimum value")
def testDataSet(self):
testData = [(93, 29), (89, 33)]
self.assertEqual(indexOfLowestVariance(testData), 2, "Index of Lowest score failed")
def testParseAndExtract(self)
self.assertEqual(parseLine("a b 23 2 ob", 2,4, (23,"ob")), (), "")
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "17ucs091/ReUnite",
"score": 3
} |
#### File: Backend/application/__init__.py
```python
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS,cross_origin
"""
Setting up the database
"""
db = SQLAlchemy()
def create_app():
"""
Function Construct the core application for the flask
"""
app = Flask(__name__)
CORS(app)
app.config.from_object('config.Config')
"""
Initializing the sqlalchemy db with the app
"""
db.init_app(app)
"""
Creating the app context
"""
with app.app_context():
from . import routes
db.create_all()
return app
``` |
{
"source": "17user/aidns",
"score": 3
} |
#### File: app/auth/functions.py
```python
def parse(roll):
branch_dict = {
'00': 'CE',
'10': 'CSE',
'13': 'IT',
'30': 'EE',
'43': 'ME'
}
year = '20' + roll[0:2]
t = roll[5:7]
branch = branch_dict[t]
return year, branch
``` |
{
"source": "17zhangw/featurewiz",
"score": 2
} |
#### File: featurewiz/featurewiz/stacking_models.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import ElasticNet, Lasso, BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler,StandardScaler,OneHotEncoder
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error,auc
from sklearn.svm import LinearSVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import AdaBoostClassifier
import warnings
warnings.filterwarnings('ignore')
def rmse(y_true,y_pred):
return np.sqrt(mean_squared_error(y_true,y_pred))
##################################################
### Define the input models here #######
###################################################
class Stacking_Classifier(BaseEstimator, RegressorMixin, TransformerMixin):
"""
############ Credit for Blending Regressor ############
#### Greatly indebted to <NAME> who created Blending Regressor
#### https://gilberttanner.com/blog/introduction-to-ensemble-learning
#### I have modifed his code to create a Stacking Classifier #########
#######################################################################
"""
def __init__(self):
n_folds = 3
logit = LogisticRegression(C=1.0, random_state = 1, max_iter=5000)
DT = DecisionTreeClassifier(max_depth=10, random_state = 3)
GBoost = LinearSVC(random_state=99)
model_rf = RandomForestClassifier(max_depth=10,n_estimators=100,
random_state=99)
xgbc = AdaBoostClassifier(random_state=0)
gpc = MLPClassifier(hidden_layer_sizes=50, random_state=0)
base_models = (logit, model_rf, DT, GBoost, xgbc, gpc)
meta_model = DT
self.base_models = base_models
self.meta_model = meta_model
self.n_folds = n_folds
# We again fit the data on clones of the original models
def fit(self, X, y):
self.base_models_ = [list() for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
# Train cloned base models then create out-of-fold predictions
# that are needed to train the cloned meta-model
out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models):
for train_index, holdout_index in kfold.split(X, y):
instance = clone(model)
self.base_models_[i].append(instance)
instance.fit(X.iloc[train_index], y.iloc[train_index])
y_pred = instance.predict(X.iloc[holdout_index])
out_of_fold_predictions[holdout_index, i] = y_pred
# Now train the cloned meta-model using the out-of-fold predictions as new feature
self.meta_model_.fit(out_of_fold_predictions, y)
return self
#Do the predictions of all base models on the test data and use the averaged predictions as
#meta-features for the final prediction which is done by the meta-model
def predict(self, X):
meta_features = np.column_stack([
stats.mode(np.column_stack([model.predict(X) for model in base_models]), axis=1)[0]
for base_models in self.base_models_ ])
return self.meta_model_.predict(meta_features)
###################################################################
from sklearn.model_selection import train_test_split
import pathlib
from scipy import stats
from scipy.stats import norm, skew
from sklearn.linear_model import Lasso
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
import xgboost as xgb
class Blending_Regressor(BaseEstimator, RegressorMixin, TransformerMixin):
"""
############ Credit for Blending Regressor ############
#### Greatly indebted to <NAME> who created Blending Regressor
#### https://gilberttanner.com/blog/introduction-to-ensemble-learning
#### I have modifed his code to create a Stacking Classifier #########
#######################################################################
"""
def __init__(self, holdout_pct=0.2, use_features_in_secondary=False):
# create models
lasso_model = make_pipeline(RobustScaler(), Lasso(alpha=0.0005, random_state=1))
rf = RandomForestRegressor()
gbr = GradientBoostingRegressor()
xgb_model = xgb.XGBRegressor()
base_models = [gbr, rf, xgb_model, lasso_model]
meta_model = lasso_model
self.base_models = base_models
self.meta_model = meta_model
self.holdout_pct = holdout_pct
self.use_features_in_secondary = use_features_in_secondary
def fit(self, X, y):
self.base_models_ = [clone(x) for x in self.base_models]
self.meta_model_ = clone(self.meta_model)
X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=self.holdout_pct)
holdout_predictions = np.zeros((X_holdout.shape[0], len(self.base_models)))
for i, model in enumerate(self.base_models_):
model.fit(X_train, y_train)
y_pred = model.predict(X_holdout)
holdout_predictions[:, i] = y_pred
if self.use_features_in_secondary:
self.meta_model_.fit(np.hstack((X_holdout, holdout_predictions)), y_holdout)
else:
self.meta_model_.fit(holdout_predictions, y_holdout)
return self
def predict(self, X):
meta_features = np.column_stack([
model.predict(X) for model in self.base_models_
])
if self.use_features_in_secondary:
return self.meta_model_.predict(np.hstack((X, meta_features)))
else:
return self.meta_model_.predict(meta_features)
######################################################################################
``` |
{
"source": "17zhangw/peloton",
"score": 2
} |
#### File: testing/sqlite_trace/trace-replay.py
```python
import os
import re
import sys
import socket
import time
import psycopg2
from optparse import OptionParser
from config import *
from subprocess import Popen
def extract_sql(input_path, output_path):
infile = open(input_path)
outfile = open(output_path, "w")
new_statement = True
wait_for_blank = False
query = ""
for line in infile:
line = line.strip() + " "
if line == " ":
if not wait_for_blank:
outfile.write(query + ";\n")
new_statement = True
wait_for_blank = False
query = ""
continue
if wait_for_blank:
continue
l = line.strip().split()
if l[0] in ['#', 'statement', 'query', 'halt', 'hash-threshold']:
continue
if l[0] in ['onlyif', 'skipif']:
wait_for_blank = True
continue
if l[0] == '----':
outfile.write(query + ";\n")
wait_for_blank = True
continue
# Multi-line query
query += line
for kw in kw_filter:
if re.search(kw, line.upper()):
wait_for_blank = True
break
## FOR
infile.close()
outfile.close()
# DEF
def get_pg_jdbc():
return "jdbc:postgresql://127.0.0.1:5433/%s" % pg_database
# DEF
def get_peloton_jdbc():
return "jdbc:postgresql://127.0.0.1:%s/" % peloton_port
# DEF
def gen_config(path):
conf = open(path, "w")
conf.write(get_peloton_jdbc() + "\n")
conf.write(peloton_username + "\n")
conf.write(peloton_password + "\n")
conf.write(get_pg_jdbc() + "\n")
conf.write(pg_username + "\n")
conf.write(pg_password + "\n")
# DEF
def wait_for_peloton_ready():
peloton_ready = False
while not peloton_ready:
try:
sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sk.settimeout(1)
sk.connect(('127.0.0.1', peloton_port))
peloton_ready = True
sk.close()
except Exception:
pass
if not peloton_ready:
time.sleep(1)
# DEF
def drop_all_tables(conn):
cur = conn.cursor()
cur.execute("SELECT table_schema,table_name FROM information_schema.tables WHERE table_schema = 'public' ORDER BY table_schema,table_name")
rows = cur.fetchall()
for row in rows:
print "dropping table: ", row[1]
cur.execute("drop table " + row[1] + " cascade")
cur.close()
conn.commit()
## DEF
if __name__ == "__main__":
global peloton_path, output_dir, peloton_log_file
work_path = os.path.abspath(".")
output_dir = os.path.abspath(output_dir)
sql_file = os.path.abspath(os.path.join(output_dir, "test.sql"))
config = os.path.abspath(os.path.join(output_dir, "config"))
peloton_log_file = os.path.abspath(os.path.join(output_dir, peloton_log_file))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
gen_config(config)
peloton_log = open(peloton_log_file, "w")
peloton = None
parser = OptionParser()
parser.add_option("--config", dest="config", help="config file path for the testing framework", default=config)
parser.add_option("--out", dest="out", help="Output path for JUnit XML files", default=output_dir)
(options, args) = parser.parse_args()
try:
# Open a connection to postgres so that we can drop the tables
conn = psycopg2.connect(
dbname = pg_database,
user = pg_username,
password = <PASSWORD>,
port = 5433)
# Extract the SQL from the trace files
for path in traces:
print "="*80
print "test: ", path
print "="*80
sys.stdout.flush()
# Always drop the tables in the database first
drop_all_tables(conn)
# Convert the trace file into a format that we can handle
extract_sql(path, sql_file)
# Start peloton
peloton = Popen([peloton_path, "-port", str(peloton_port)],
stdout=peloton_log, stderr=peloton_log)
wait_for_peloton_ready()
# Run the test
os.chdir(peloton_test_path)
test = Popen(["bash", "bin/peloton-test", "-config", options.config, "-trace", sql_file, "-out", options.out, "-batchsize", "100"])
test.wait()
os.chdir(work_path)
peloton.kill()
peloton = None
# FOR
except Exception as e:
raise
finally:
if peloton is not None: peloton.kill()
``` |
{
"source": "17zhangw/postgres",
"score": 2
} |
#### File: extensions/hutch/tscout_feature_gen.py
```python
import re
from dataclasses import dataclass
from pathlib import Path
from typing import List, Mapping, Tuple
from clang.cindex import TypeKind
from tscout import model
# We're assuming that this script is housed in `postgres/cmudb/extensions/tscout`.
# We calculate the path of TScout relative to this extension and add it to the PythonPath temporarily.
TSCOUT_EXTENSION_PATH = Path(__file__).parent
CODEGEN_TEMPLATE_PATH = Path.joinpath(TSCOUT_EXTENSION_PATH, "operating_unit_codegen.c")
CODEGEN_FILE_PATH = Path.joinpath(TSCOUT_EXTENSION_PATH, "operating_unit_features.h")
@dataclass
class ExtractionOU:
"""
Represents an Operating Unit whose features are to be extracted from a running instance of PostgreSQL.
ou_index : int
The index of the Operating Unit (OU) in the list of OUs produced by the Model class.
pg_enum_index : int
The index/value of the corresponding enumeration constant in the PostgreSQL source code.
ou_name: str
The name of the Operating Unit.
features: List[Tuple[str, TypeKind, int]]
The list of (feature-name, feature-data-type, padding_field_size) pairs which correspond to the features.
padding_field_size is not None only if the field is a padding field.
"""
ou_index: int
pg_enum_index: int
ou_name: str
features: List[Tuple[str, TypeKind, int]]
OU_TO_FEATURE_LIST_MAP: Mapping[int, ExtractionOU] = {}
# The following OUs do not follow the general naming convention:
# OU Name: ExecABC
# Postgres struct name: T_ABC
# We capture these OUs as exceptions manually.
# TODO (Karthik): Find out when and why this happens.
OU_EXCEPTIONS = {"ExecHashJoinImpl": "T_HashJoin"}
OU_EXCLUDED_FEATURES = [
"query_id",
"left_child_plan_node_id",
"right_child_plan_node_id",
"statement_timestamp",
]
def aggregate_features(ou):
"""
Extract the name, type, and padding field size of every feature for the given OU.
Parameters
----------
ou : model.OperatingUnit
The OU to extract features from.
Returns
-------
features_list : List[Tuple[str, clang.cindex.TypeKind, int]]
The [(name, type, padding_field_size)] of all features for the given OU.
"""
features_list = []
for feature in ou.features_list:
for variable in feature.bpf_tuple:
if variable.pg_type == "List *":
# This is not a long-term solution if we start defining more Reagents.
features_list.append((variable.name, variable.pg_type, variable.padding_field_size))
elif variable.name in OU_EXCLUDED_FEATURES:
continue
else:
features_list.append((variable.name, variable.c_type, variable.padding_field_size))
return features_list
def add_features(features_string, feat_index, ou_xs):
"""
Build up the features string for the given OU.
Parameters
----------
features_string : str
The string containing all the features.
Initialize with the empty string.
feat_index : int
The index of the feature to extract.
ou_xs : List[Tuple[str, clang.cindex.TypeKind, int]]
The (names, types, padding_field_size) of all the features for the given OU.
Returns
-------
new_features_string : str
The new features string for the given OU.
"""
features_string += "\n"
features_struct_list = []
for x in ou_xs:
(name, value, padding_field_size) = x
type_kind = "T_UNKNOWN"
padding_field = padding_field_size if padding_field_size else 0
if value == TypeKind.CONSTANTARRAY and padding_field_size is not None:
# This indicates a padding field.
type_kind = "T_PADDING"
elif value == "List *":
type_kind = "T_LIST_PTR" # This is not a long-term solution if we start defining more Reagents.
elif value == TypeKind.POINTER:
type_kind = "T_PTR"
elif value in [TypeKind.INT, TypeKind.UINT]:
type_kind = "T_INT"
elif value in [TypeKind.LONG, TypeKind.ULONG]:
type_kind = "T_LONG"
elif value == TypeKind.SHORT:
type_kind = "T_SHORT"
elif value == TypeKind.DOUBLE:
type_kind = "T_DOUBLE"
elif value == TypeKind.ENUM:
type_kind = "T_ENUM"
elif value == TypeKind.BOOL:
type_kind = "T_BOOL"
else:
type_kind = str(value)
features_struct_list.append(f'{{ {type_kind}, "{name}", {padding_field} }}')
features_struct = str.join(", ", features_struct_list)
features_string += f"field feat_{feat_index:d}[] = {{ " + features_struct + " };"
return features_string
def fill_in_template(ou_string, ou_index, node_type, ou_xs):
"""
Fill in the codegen template for the given OU.
Parameters
----------
ou_string : str
The current ou_string.
ou_index : int
The index of the OU.
node_type : str
The name of the OU.
ou_xs : List[Tuple[str, clang.cindex.TypeKind, int]]
The (name, type, padding) of all features for the OU.
Returns
-------
new_ou_string : str
The codegen template with the given OU's details substituted.
"""
# Replace the index of the OU.
ou_string = ou_string.replace("OU_INDEX", f"{ou_index:d}")
# Replace the name of the OU.
ou_string = ou_string.replace("OU_NAME", f'"{node_type}"')
# Compute and replace the number of features.
ou_string = ou_string.replace("NUM_Xs", f"{len(ou_xs):d}")
# If there are features, add the list of features.
# Otherwise, replace with a dummy string.
if ou_xs:
ou_string = ou_string.replace("OU_Xs", f"feat_{ou_index}")
else:
ou_string = ou_string.replace("OU_Xs", "feat_none")
return ou_string
def main():
"""
Generate the TScout features and fill in the codegen template.
"""
modeler = model.Model()
# Fetch the NodeTag enum.
pg_mapping = modeler.get_enum_value_map("NodeTag")
for i in range(len(pg_mapping)):
OU_TO_FEATURE_LIST_MAP[i] = {}
# Populate the NodeTag's details.
for (index, ou) in enumerate(modeler.operating_units):
if ou.name().startswith("Exec"):
struct_name = ou.name()[len("Exec") :]
pg_struct_name = "T_" + struct_name
pg_enum_index = None
if pg_struct_name in pg_mapping.keys():
pg_enum_index = pg_mapping[pg_struct_name]
elif ou.name() in OU_EXCEPTIONS:
pg_struct_name = OU_EXCEPTIONS[ou.name()]
pg_enum_index = pg_mapping[pg_struct_name]
if pg_enum_index:
OU_TO_FEATURE_LIST_MAP[pg_enum_index] = ExtractionOU(
index, pg_mapping[pg_struct_name], ou.name(), aggregate_features(ou)
)
# Open and analyse the codegen file.
with open(str(CODEGEN_TEMPLATE_PATH), "r", encoding="utf-8") as template:
text = template.read()
# Find a sequence that matches "(ou){.*},".
matches = re.findall(r"\(ou\){.*},", text)
assert len(matches) == 1
feat_matcher = re.findall(r"// Features go here.", text)
assert len(matches) == 1
match = matches[0]
feat_match = feat_matcher[0]
ou_struct_list = []
features_list_string = ""
# For each OU, generate the features and fill in the codegen template.
for (key, value) in OU_TO_FEATURE_LIST_MAP.items():
# Initialize with the matching string.
ou_string = match
if value:
ou_xs = value.features
ou_string = fill_in_template(ou_string, key, value.ou_name, ou_xs)
features_list_string = add_features(features_list_string, key, ou_xs)
else:
# Print defaults.
ou_string = fill_in_template(ou_string, -1, "", [])
ou_struct_list.append(ou_string)
ou_struct_list_string = str.join("\n", ou_struct_list)
text = text.replace(match, ou_struct_list_string)
text = text.replace(feat_match, features_list_string)
with open(CODEGEN_FILE_PATH, "w", encoding="utf-8") as gen_file:
gen_file.write(text)
if __name__ == "__main__":
main()
```
#### File: cmudb/tscout/clang_parser.py
```python
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List, Mapping, Tuple
import clang.cindex
logger = logging.getLogger("tscout")
# Expected path of this file: "postgres/cmudb/tscout/"
# Path to the Postgres root.
POSTGRES_PATH = Path(__file__).parent.parent.parent
# Path to the Postgres files to parse.
POSTGRES_FILES = (f"{POSTGRES_PATH}/src/backend/executor/execMain.c",)
# The arguments that Clang uses to parse header files.
CLANG_ARGS = [
"-std=c17",
f"-I{POSTGRES_PATH}/src/include",
"-I/usr/lib/gcc/x86_64-linux-gnu/9/include",
"-I/usr/local/include",
"-I/usr/include/x86_64-linux-gnu",
"-I/usr/include",
]
def convert_define_to_arg(input_define):
"""
Convert from a #define to a command line arg.
Parameters
----------
input_define : str
String in the format of "#define variable value".
Returns
-------
output_str : str
String in the format of "-Dvariable=value".
"""
var_and_value = input_define.rstrip()[len("#define ") :]
separator = var_and_value.find(" ")
var = var_and_value[:separator]
value = var_and_value[separator + 1 :]
return f"-D{var}={value}"
# Grab the results of ./configure to make sure that we're passing the same
# preprocessor #defines to libclang as when compiling Postgres.
# #defines can affect struct sizing depending on machine environment.
with open(f"{POSTGRES_PATH}/config.log", encoding="utf-8") as config_file:
for config_line in config_file:
if config_line.startswith("#define "):
CLANG_ARGS.append(convert_define_to_arg(config_line))
@dataclass
class Field:
"""A field of a struct, as parsed by Clang."""
name: str
pg_type: str
canonical_type_kind: clang.cindex.TypeKind
alignment: int = None # Non-None for the first field of a struct, using alignment value of the struct.
field_size: int = None # Size of the field
is_padding: bool = False # Whether this field is a generated padding field.
class ClangParser:
"""
On init, ClangParser parses the PostgreSQL source code to construct a
mapping from struct name to a list of the struct fields, where the fields
list has both base classes expanded and record types expanded.
Attributes
----------
field_map : Mapping[str, List[Field]]
Maps from (struct name) to a (base-class- and record-type-
expanded list of all fields for the struct).
"""
def __init__(self):
indexes: List[clang.cindex.Index] = []
translation_units: List[clang.cindex.TranslationUnit] = []
classes: Mapping[str, clang.cindex.Cursor] = {}
enums: Mapping[str, clang.cindex.Cursor] = {}
# Parse each postgres file's definitions into the classes map.
# classes is a map to handle potential duplicate definitions
# from parsing multiple translation units.
for postgres_file in POSTGRES_FILES:
# Parse the translation unit.
index = clang.cindex.Index.create()
tunit = index.parse(postgres_file, args=CLANG_ARGS)
# Keep the index and translation unit alive for the rest of init.
indexes.append(index)
translation_units.append(tunit)
# Add all relevant definitions to the classes map.
for node in tunit.cursor.get_children():
kind_ok = node.kind in [
clang.cindex.CursorKind.CLASS_DECL,
clang.cindex.CursorKind.STRUCT_DECL,
clang.cindex.CursorKind.UNION_DECL,
]
kind_enum = node.kind in [
clang.cindex.CursorKind.ENUM_DECL,
]
is_new = node.spelling not in classes
# Fix forward declarations clobbering definitions.
is_real_def = node.is_definition()
if kind_ok and is_new and is_real_def:
classes[node.spelling] = node
elif kind_enum and is_new and is_real_def:
enums[node.spelling] = node
# To construct the field map, we will construct the following objects:
# 1. _classes
# Extract a list of all classes in the translation units.
# 2. _bases
# Extract a mapping from class name to all base classes.
# 3. _fields
# Extract a mapping from class name to all the fields,
# but base classes not expanded, record types not expanded.
# 4. _rtti_map
# _fields with base classes expanded.
# 5. field_map
# _fields with base classes expanded and record types expanded.
# 6. enum_map
# Extract a mapping of all enumerations in the code base where the value is a
# list of (enum_name, enum_value) pairs.
# _classes : list of all classes in the translation unit
self._classes: List[clang.cindex.Cursor] = classes.values()
self._classes = sorted(self._classes, key=lambda node: node.spelling)
# _bases : class name -> list of base classes for the class, for C++ inheritance.
self._bases: Mapping[str, List[clang.cindex.Cursor]] = {
node.spelling: [
child.referenced
for child in node.get_children()
if child.kind == clang.cindex.CursorKind.CXX_BASE_SPECIFIER
]
for node in self._classes
}
# _fields : class name -> list of fields in the class
self._fields: Mapping[str, List[Field]] = {
node.spelling: [
Field(
child.displayname,
child.type.spelling
if child.type.get_canonical().kind != clang.cindex.TypeKind.RECORD
else child.type.get_canonical().get_declaration().spelling,
child.type.get_canonical().kind,
field_size=child.type.get_size(),
)
for child in node.get_children()
if child.kind == clang.cindex.CursorKind.FIELD_DECL
]
for node in self._classes
}
# _rtti_map : class name ->
# list of fields in the class with base classes expanded, for C++ inheritance.
self._rtti_map: Mapping[str, List[Field]] = {
node_name: self._construct_base_expanded_fields(node_name) for node_name in self._bases
}
# field_map: class name ->
# list of fields in the class with base classes expanded
# and record types expanded
self.field_map: Mapping[str, List[Field]] = {
node_name: self._construct_fully_expanded_fields(node_name, classes, prefix=f"{node_name}_")
for node_name in self._bases
}
self.enum_map: Mapping[str, List[Tuple[str, int]]] = {
node: [(child.spelling, child.enum_value) for child in enum.get_children()] for node, enum in enums.items()
}
def _construct_base_expanded_fields(self, class_name):
"""
Construct the list of base-class-expanded fields.
Depends on self._fields and self._bases.
Base class fields are prepended.
Parameters
----------
class_name : str
The name of the class to construct a field list for.
Returns
-------
A base-class-expanded list of fields for the input class.
"""
fields, bases = self._fields, self._bases
# If the class has no fields, we are done.
field_list = fields.get(class_name)
if field_list is None:
return []
# Otherwise, if there are any base classes,
# recursively prepend the fields from the base classes,
# and then return the fields for this class.
base_classes = bases[class_name]
for base_class in base_classes:
base = self._construct_base_expanded_fields(base_class.spelling)
field_list = base + field_list
return field_list
def _construct_fully_expanded_fields(self, class_name, classes, prefix=""):
"""
Construct the list of base-class- and record-type- expanded fields.
Depends on self._rtti_map.
Parameters
----------
class_name : str
The name of the class to construct a field list for.
prefix : str
Recursive helper parameter, will be prefixed onto field names.
Returns
-------
A base-class- and record-type- expanded list of fields for the class.
Warnings
--------
Record types are only expanded wherever possible, and are otherwise
dropped after printing a warning.
"""
rtti_map = self._rtti_map
fields = rtti_map[class_name]
new_fields = []
# For every field in the base-class-expanded field list for the class,
for field in fields:
if field.canonical_type_kind != clang.cindex.TypeKind.RECORD:
# If the field is not a record type,
# just append the field to the list of new fields.
new_field = Field(
name=f"{prefix}{field.name}",
pg_type=field.pg_type,
canonical_type_kind=field.canonical_type_kind,
is_padding=field.is_padding,
field_size=field.field_size,
)
new_fields.append(new_field)
else:
# If the field is a record type, try adding the list of
# base-class- and record-type- expanded fields.
# However, this is not always possible,
# e.g., for non-PostgreSQL structs.
if field.pg_type not in rtti_map:
logger.warning("No type info for %s used in %s.", field.pg_type, class_name)
else:
expanded_fields = self._construct_fully_expanded_fields(
field.pg_type, classes, prefix=prefix + f"{field.name}_"
)
new_fields.extend(expanded_fields)
# This is a big band-aid, aimed at temporarily patching the behavior of hutch/TScout when
# it comes to struct alignments. As of this change (Mar 7, 2022), we have that BPF ->
# Python userspace structs don't respect __attribute__((aligned(#))). It doesn't seem like
# ctypes has a way of specifying per-field alignment either...
#
# We can theoretically generate structs for the python collector to read but then Hutch would
# still segfault with the incorrect offsets. The following attempts to compute whether or not
# there will be padding between the last data variable and the end of the struct.
#
# If there is a difference, a manual "padding" field is inserted.
struct_size = classes[field.pg_type].type.get_size()
last_field = rtti_map[field.pg_type][-1]
last_field_offset = int(classes[field.pg_type].type.get_offset(last_field.name) / 8)
last_field_data = last_field.field_size + last_field_offset
if struct_size - last_field_data > 0:
padding = struct_size - last_field_data
new_fields.append(
Field(
name=f"{prefix}{field.name}_padding",
pg_type=None,
canonical_type_kind=clang.cindex.TypeKind.CONSTANTARRAY,
field_size=padding,
is_padding=True,
)
)
new_fields[0].alignment = classes[class_name].type.get_align()
# The alignment value is the struct's alignment, not the field. We assign this to the first field of a
# struct since the address of a struct and its first field must be the same since their memory addresses must be
# the same.
return new_fields
```
#### File: cmudb/tscout/tscout.py
```python
import argparse
import logging
import multiprocessing as mp
import os
import sys
from dataclasses import dataclass
import model
import psutil
import setproctitle
from bcc import ( # pylint: disable=no-name-in-module
BPF,
USDT,
PerfHWConfig,
PerfType,
utils,
)
@dataclass
class PostgresInstance:
"""Finds and then stashes the PIDs for a postgres instance designated by the constructor's pid argument."""
def __init__(self, pid):
def cmd_in_cmdline(cmd, proc):
"""
Parameters
----------
cmd: str
proc: psutil.Process
Returns
-------
True if the provided command was in the provided Process' command line args.
"""
return any(cmd in x for x in proc.cmdline())
self.postgres_pid = pid
try:
# Iterate through all the children for the given PID, and extract PIDs for expected background workers.
for child in psutil.Process(self.postgres_pid).children():
if not self.checkpointer_pid and cmd_in_cmdline("checkpointer", child):
self.checkpointer_pid = child.pid
elif not self.bgwriter_pid and cmd_in_cmdline("background", child) and cmd_in_cmdline("writer", child):
self.bgwriter_pid = child.pid
elif not self.walwriter_pid and cmd_in_cmdline("walwriter", child):
self.walwriter_pid = child.pid
elif all(x is not None for x in [self.checkpointer_pid, self.bgwriter_pid, self.walwriter_pid]):
# We found all the children PIDs that we care about, so we're done.
return
except psutil.NoSuchProcess:
logger.error("Provided PID not found.")
sys.exit(1)
if any(x is None for x in [self.checkpointer_pid, self.bgwriter_pid, self.walwriter_pid]):
# TODO(Matt): maybe get fancy with dataclasses.fields() so we don't have to keep adding to this if more
# fields are added to the dataclass?
logger.error("Did not find expected background workers for provided PID.")
sys.exit(1)
postgres_pid: int = None
checkpointer_pid: int = None
bgwriter_pid: int = None
walwriter_pid: int = None
logger = logging.getLogger("tscout")
# Set up the OUs and metrics to be collected.
modeler = model.Model()
operating_units = modeler.operating_units
metrics = modeler.metrics
# OUs may have common structs that cause duplicate struct definitions in
# the collector_c file that is generated, e.g., struct Plan.
# helper_struct_defs is used to avoid duplicate struct definitions by
# accumulating all the struct definitions exactly once, and defining those
# structs at one shot at the start of the generated collector_c file.
HELPER_STRUCT_DEFS = {}
def generate_readargs(feature_list):
"""
Generate bpf_usdt_readargs_p() calls for the given feature list.
This function assumes that the following are in scope:
- struct pt_regs *ctx
- struct SUBST_OU_output *output
Parameters
----------
feature_list : List[model.Feature]
List of BPF features being emitted.
Returns
-------
code : str
bpf_usdt_readarg() and bpf_usdt_readarg_p() invocations.
"""
code = []
non_feature_usdt_args = 1 # Currently just ou_instance. If any other non-feature args are added, increment this.
for idx, feature in enumerate(feature_list, 1):
first_member = feature.bpf_tuple[0].name
if feature.readarg_p:
readarg_p = [
" bpf_usdt_readarg_p(",
f"{idx + non_feature_usdt_args}, ",
"ctx, ",
f"&(features->{first_member}), ",
f"sizeof(struct DECL_{feature.name})",
");\n",
]
code.append("".join(readarg_p))
else:
readarg = [
" bpf_usdt_readarg(",
f"{idx + non_feature_usdt_args}, ",
"ctx, ",
f"&(features->{first_member})",
");\n",
]
code.append("".join(readarg))
return "".join(code)
def generate_reagents(feature_list, reagents_used):
code = []
for feature in feature_list:
for field in feature.bpf_tuple:
if field.pg_type in model.REAGENTS:
reagents_used.add(field.pg_type)
code.append(model.REAGENTS[field.pg_type].produce_one_field(field.name))
return "".join(code)
def generate_markers(operation, ou_index, reagents_used):
# pylint: disable=global-statement
global HELPER_STRUCT_DEFS
# Load the C code for the Markers.
with open("markers.c", "r", encoding="utf-8") as markers_file:
markers_c = markers_file.read()
# Replace OU-specific placeholders in C code.
markers_c = markers_c.replace("SUBST_OU", f"{operation.function}")
markers_c = markers_c.replace("SUBST_READARGS", generate_readargs(operation.features_list))
# TODO(Matt): We're making multiple passes through the features_list. Maybe collapse generate_reagents and
# generate_readargs into one function.
markers_c = markers_c.replace("SUBST_REAGENTS", generate_reagents(operation.features_list, reagents_used))
markers_c = markers_c.replace("SUBST_FEATURES", operation.features_struct())
markers_c = markers_c.replace("SUBST_INDEX", str(ou_index))
markers_c = markers_c.replace("SUBST_FIRST_FEATURE", operation.features_list[0].bpf_tuple[0].name)
# Accumulate struct definitions.
HELPER_STRUCT_DEFS = {**HELPER_STRUCT_DEFS, **operation.helper_structs()}
return markers_c
def collector(collector_flags, ou_processor_queues, pid, socket_fd):
setproctitle.setproctitle(f"{pid} TScout Collector")
# Read the C code for the Collector.
with open("collector.c", "r", encoding="utf-8") as collector_file:
collector_c = collector_file.read()
# Append the C code for the Probes.
with open("probes.c", "r", encoding="utf-8") as probes_file:
collector_c += probes_file.read()
# Append the C code for the Markers. Accumulate the Reagents that we need into a set to use later to add the
# definitions that we need.
reagents_used = set()
for ou_index, ou in enumerate(operating_units):
collector_c += generate_markers(ou, ou_index, reagents_used)
# Process the list of Reagents that we need. Prepend the C code for the Reagent functions, add struct declaration to
# HELPER_STRUCT_DEFS to be prepended later.
for reagent_used in reagents_used:
reagent = model.REAGENTS[reagent_used]
collector_c = reagent.reagent_fn() + "\n" + collector_c
if reagent.type_name not in HELPER_STRUCT_DEFS:
# We may already have a struct definition for this type if it was unrolled in a struct somewhere already.
HELPER_STRUCT_DEFS[reagent.type_name] = model.struct_decl_for_fields(reagent.type_name, reagent.bpf_tuple)
# Prepend the helper struct defs.
collector_c = "\n".join(HELPER_STRUCT_DEFS.values()) + "\n" + collector_c
# Replace placeholders related to metrics.
defs = [f"{model.CLANG_TO_BPF[metric.c_type]} {metric.name}{metric.alignment_string()}" for metric in metrics]
metrics_struct = ";\n".join(defs) + ";"
collector_c = collector_c.replace("SUBST_METRICS", metrics_struct)
accumulate = [
f"lhs->{metric.name} += rhs->{metric.name}"
for metric in metrics
if metric.name not in ("start_time", "end_time", "pid", "cpu_id")
] # don't accumulate these metrics
metrics_accumulate = ";\n".join(accumulate) + ";"
collector_c = collector_c.replace("SUBST_ACCUMULATE", metrics_accumulate)
collector_c = collector_c.replace("SUBST_FIRST_METRIC", metrics[0].name)
num_cpus = len(utils.get_online_cpus())
collector_c = collector_c.replace("MAX_CPUS", str(num_cpus))
# Attach USDT probes to the target PID.
collector_probes = USDT(pid=pid)
for ou in operating_units:
for probe in [ou.features_marker(), ou.begin_marker(), ou.end_marker(), ou.flush_marker()]:
collector_probes.enable_probe(probe=probe, fn_name=probe)
# Load the BPF program, eliding setting the socket fd
# if this pid won't generate network metrics.
cflags = ['-DKBUILD_MODNAME="collector"']
if socket_fd:
cflags.append(f"-DCLIENT_SOCKET_FD={socket_fd}")
collector_bpf = BPF(text=collector_c, usdt_contexts=[collector_probes], cflags=cflags)
# open perf hardware events for BPF program
collector_bpf["cpu_cycles"].open_perf_event(PerfType.HARDWARE, PerfHWConfig.CPU_CYCLES)
collector_bpf["instructions"].open_perf_event(PerfType.HARDWARE, PerfHWConfig.INSTRUCTIONS)
collector_bpf["cache_references"].open_perf_event(PerfType.HARDWARE, PerfHWConfig.CACHE_REFERENCES)
collector_bpf["cache_misses"].open_perf_event(PerfType.HARDWARE, PerfHWConfig.CACHE_MISSES)
collector_bpf["ref_cpu_cycles"].open_perf_event(PerfType.HARDWARE, PerfHWConfig.REF_CPU_CYCLES)
heavy_hitter_ou_index = -1
heavy_hitter_counter = 0
def heavy_hitter_update(ou_index):
# heavy_hitter_update pylint: disable=unused-variable
nonlocal heavy_hitter_counter
nonlocal heavy_hitter_ou_index
if heavy_hitter_counter == 0:
heavy_hitter_ou_index = ou_index
heavy_hitter_counter = 1
else:
if heavy_hitter_ou_index == ou_index:
heavy_hitter_counter = heavy_hitter_counter + 1
else:
heavy_hitter_counter = heavy_hitter_counter - 1
lost_collector_events = 0
def lost_collector_event(num_lost):
nonlocal lost_collector_events
lost_collector_events = lost_collector_events + num_lost
def collector_event_builder(output_buffer):
def collector_event(cpu, data, size):
# pylint: disable=unused-argument
raw_data = collector_bpf[output_buffer].event(data)
operating_unit = operating_units[raw_data.ou_index]
event_features = operating_unit.serialize_features(
raw_data
) # TODO(Matt): consider moving serialization to CSV string to Processor
training_data = "".join(
[event_features, ",", ",".join(metric.serialize(raw_data) for metric in metrics), "\n"]
)
ou_processor_queues[raw_data.ou_index].put(training_data) # TODO(Matt): maybe put_nowait?
# heavy_hitter_update(raw_data.ou_index)
return collector_event
# Open an output buffer for this OU.
for i in range(len(operating_units)):
output_buffer = f"collector_results_{i}"
collector_bpf[output_buffer].open_perf_buffer(
callback=collector_event_builder(output_buffer), lost_cb=lost_collector_event
)
logger.info("Collector attached to PID %s.", pid)
# Poll on the Collector's output buffer until Collector is shut down.
while collector_flags[pid]:
try:
# Use a timeout to periodically check the flag
# since polling the output buffer blocks.
collector_bpf.perf_buffer_poll(1000)
except KeyboardInterrupt:
logger.info("Collector for PID %s caught KeyboardInterrupt.", pid)
except Exception as e: # pylint: disable=broad-except
logger.warning("Collector for PID %s caught %s.", pid, e)
if lost_collector_events > 0:
logger.warning("Collector for PID %s lost %s events.", pid, lost_collector_events)
logger.info("Collector for PID %s shut down.", pid)
def lost_something(num_lost):
# num_lost. pylint: disable=unused-argument
pass
def processor(ou, buffered_strings, outdir, append):
setproctitle.setproctitle(f"TScout Processor {ou.name()}")
file_path = f"{outdir}/{ou.name()}.csv"
file_mode = "w"
if append and os.path.exists(file_path):
file_mode = "a"
elif append:
logger.warning("--append specified but %s does not exist. Creating this file instead.", file_path)
# Open output file, with the name based on the OU.
with open(file_path, mode=file_mode, encoding="utf-8") as file:
if file_mode == "w":
# Write the OU's feature columns for CSV header,
# with an additional separator before resource metrics columns.
file.write(ou.features_columns() + ",")
# Write the resource metrics columns for the CSV header.
file.write(",".join(metric.name for metric in metrics) + "\n")
logger.info("Processor started for %s.", ou.name())
try:
# Write serialized training data points from shared queue to file.
while True:
string = buffered_strings.get()
file.write(string)
except KeyboardInterrupt:
logger.info("Processor for %s caught KeyboardInterrupt.", ou.name())
while True:
# TScout is shutting down.
# Write any remaining training data points.
string = buffered_strings.get()
if string is None:
# Collectors have all shut down, and poison pill
# indicates there are no more training data points.
logger.info("Processor for %s received poison pill.", ou.name())
break
file.write(string)
except Exception as e: # pylint: disable=broad-except
logger.warning("Processor for %s caught %s", ou.name(), e)
finally:
logger.info("Processor for %s shut down.", ou.name())
def main():
parser = argparse.ArgumentParser(description="TScout")
parser.add_argument("pid", type=int, help="Postmaster PID that we're attaching to")
parser.add_argument("--outdir", required=False, default=".", help="Training data output directory")
parser.add_argument(
"--append",
required=False,
default=False,
action="store_true",
help="Append to training data in output directory",
)
args = parser.parse_args()
pid = args.pid
outdir = args.outdir
append = args.append
postgres = PostgresInstance(pid)
setproctitle.setproctitle(f"{postgres.postgres_pid} TScout Coordinator")
# Read the C code for TScout.
with open("tscout.c", "r", encoding="utf-8") as tscout_file:
tscout_c = tscout_file.read()
# Attach USDT probes to the target PID.
tscout_probes = USDT(pid=postgres.postgres_pid)
for probe in ["fork_backend", "fork_background", "reap_backend", "reap_background"]:
tscout_probes.enable_probe(probe=probe, fn_name=probe)
# Load TScout program to monitor the Postmaster.
tscout_bpf = BPF(text=tscout_c, usdt_contexts=[tscout_probes], cflags=['-DKBUILD_MODNAME="tscout"'])
keep_running = True
with mp.Manager() as manager:
# Create coordination data structures for Collectors and Processors
collector_flags = manager.dict()
collector_processes = {}
ou_processor_queues = []
ou_processors = []
# Create a Processor for each OU
for ou in operating_units:
# TODO(Matt): maybe bound this queue size?
# may not work reliably with a poison pill for shutdown
ou_processor_queue = mp.Queue()
ou_processor_queues.append(ou_processor_queue)
ou_processor = mp.Process(
target=processor,
args=(ou, ou_processor_queue, outdir, append),
)
ou_processor.start()
ou_processors.append(ou_processor)
def create_collector(child_pid, socket_fd=None):
logger.info("Postmaster forked PID %s, creating its Collector.", child_pid)
collector_flags[child_pid] = True
collector_process = mp.Process(
target=collector, args=(collector_flags, ou_processor_queues, child_pid, socket_fd)
)
collector_process.start()
collector_processes[child_pid] = collector_process
def destroy_collector(collector_process, child_pid):
logger.info("Postmaster reaped PID %s, destroying its Collector.", child_pid)
collector_flags[child_pid] = False
collector_process.join()
del collector_flags[child_pid]
del collector_processes[child_pid]
def postmaster_event(cpu, data, size):
# cpu, size. pylint: disable=unused-argument
output_event = tscout_bpf["postmaster_events"].event(data)
event_type = output_event.type_
child_pid = output_event.pid_
if event_type in [0, 1]:
fd = output_event.socket_fd_ if event_type == 0 else None
create_collector(child_pid, fd)
elif event_type in [2, 3]:
collector_process = collector_processes.get(child_pid)
if collector_process:
destroy_collector(collector_process, child_pid)
else:
logger.error("Unknown event type from Postmaster.")
raise KeyboardInterrupt
tscout_bpf["postmaster_events"].open_perf_buffer(callback=postmaster_event, lost_cb=lost_something)
print(f"TScout attached to PID {postgres.postgres_pid}.")
# Poll on TScout's output buffer until TScout is shut down.
while keep_running:
try:
tscout_bpf.perf_buffer_poll()
except KeyboardInterrupt:
keep_running = False
except Exception as e: # pylint: disable=broad-except
logger.warning("TScout caught %s.", e)
print("TScout shutting down.")
# Shut down the Collectors so that
# no more data is generated for the Processors.
for pid, process in collector_processes.items():
collector_flags[pid] = False
process.join()
logger.info("Joined Collector for PID %s.", pid)
print("TScout joined all Collectors.")
# Shut down the Processor queues so that
# everything gets flushed to the Processors.
for ou_processor_queue in ou_processor_queues:
ou_processor_queue.put(None)
ou_processor_queue.close()
ou_processor_queue.join_thread()
print("TScout joined all Processor queues.")
# Shut down the Processors once the Processors are done
# writing any remaining data to disk.
for ou_processor in ou_processors:
ou_processor.join()
print("TScout joined all Processors.")
print(f"TScout for PID {postgres.postgres_pid} shut down.")
# We're done.
sys.exit()
if __name__ == "__main__":
main()
``` |
{
"source": "17zhangw/S22-15799-p1",
"score": 2
} |
#### File: behavior/datagen/generate_workloads.py
```python
from __future__ import annotations
import logging
import shutil
from pathlib import Path
import yaml
from plumbum import cli
from behavior import BENCHDB_TO_TABLES
from evaluation.utils import inject_param_xml, param_sweep_space, parameter_sweep
logger = logging.getLogger(__name__)
def datagen_sweep_callback(parameters, closure):
"""
Callback to datagen parameter sweep.
Given the current set of parameters as part of the sweep, this callback generates
the correct output workload format. Workload formats are described in more detail
in behavior/datagen/run_workloads.sh
Parameters:
-----------
parameters: List[Tuple[List[str], Any]]
The parameter combination.
closure : Dict[str, Any]
Closure environment passed from caller.
"""
mode_dir = closure["mode_dir"]
benchmark = closure["benchmark"]
benchbase_config_path = closure["benchbase_config_path"]
postgresql_config_file = closure["postgresql_config_file"]
pg_analyze = closure["pg_analyze"]
pg_prewarm = closure["pg_prewarm"]
# The suffix is a concatenation of parameter names and their values.
param_suffix = "_".join([name_level[-1] + "_" + str(value) for name_level, value in parameters])
results_dir = Path(mode_dir / (benchmark + "_" + param_suffix))
results_dir.mkdir(exist_ok=True)
print(f"Creating workload configuration: {results_dir}")
# Copy and inject the XML file of BenchBase.
benchbase_config_file = Path(results_dir / "benchbase_config.xml")
shutil.copy(benchbase_config_path, benchbase_config_file)
inject_param_xml(benchbase_config_file.as_posix(), parameters)
benchbase_configs = [str(benchbase_config_file.resolve())]
# Copy the default postgresql.conf file.
# TODO(wz2): Rewrite the postgresql.conf based on knob tweaks and modify the param_suffix above.
benchbase_postgresql_config_file = Path(results_dir / "postgresql.conf")
shutil.copy(postgresql_config_file, benchbase_postgresql_config_file)
pg_configs = [str(benchbase_postgresql_config_file.resolve())]
# Create the config.yaml file
config = {
"benchmark": benchmark,
"pg_analyze": pg_analyze,
"pg_prewarm": pg_prewarm,
"pg_configs": pg_configs,
"benchbase_configs": benchbase_configs,
}
with (results_dir / "config.yaml").open("w") as f:
yaml.dump(config, f)
class GenerateWorkloadsCLI(cli.Application):
config_file = cli.SwitchAttr(
"--config-file",
Path,
mandatory=True,
help="Path to configuration YAML containing datagen parameters.",
)
postgresql_config_file = cli.SwitchAttr(
"--postgresql-config-file",
Path,
mandatory=True,
help="Path to standard postgresql.conf that the workloads should execute with.",
)
dir_benchbase_config = cli.SwitchAttr(
"--dir-benchbase-config",
Path,
mandatory=True,
help="Path to BenchBase config files.",
)
dir_output = cli.SwitchAttr(
"--dir-output",
Path,
mandatory=True,
help="Directory to write generated data to.",
)
def main(self):
config_path = Path(self.config_file)
with config_path.open("r", encoding="utf-8") as f:
self.config = yaml.load(f, Loader=yaml.FullLoader)["datagen"]
logger.setLevel(self.config["log_level"])
# Validate the chosen benchmarks from the config.
benchmarks = self.config["benchmarks"]
for benchmark in benchmarks:
if benchmark not in BENCHDB_TO_TABLES:
raise ValueError(f"Invalid benchmark: {benchmark}")
self.dir_output.mkdir(parents=True, exist_ok=True)
modes = ["train", "eval"]
for mode in modes:
mode_dir = Path(self.dir_output) / mode
Path(mode_dir).mkdir(parents=True, exist_ok=True)
# Build sweeping space
ps_space = param_sweep_space(self.config["param_sweep"])
# For each benchmark, ...
for benchmark in benchmarks:
benchbase_config_path = self.dir_benchbase_config / f"{benchmark}_config.xml"
# Generate OU training data for every parameter combination.
closure = {
"mode_dir": mode_dir,
"benchmark": benchmark,
"benchbase_config_path": benchbase_config_path,
"postgresql_config_file": self.postgresql_config_file,
"pg_prewarm": self.config["pg_prewarm"],
"pg_analyze": self.config["pg_analyze"],
}
parameter_sweep(ps_space, datagen_sweep_callback, closure)
if __name__ == "__main__":
GenerateWorkloadsCLI.run()
```
#### File: S22-15799-p1/dodos/project1.py
```python
from plumbum import cmd
from plumbum.cmd import grep, awk
import doit
from dodos import VERBOSITY_DEFAULT
DEFAULT_DB = "project1db"
DEFAULT_USER = "project1user"
DEFAULT_PASS = "<PASSWORD>"
# Note that pgreplay requires the following configuration:
#
# log_min_messages=error (or more)
# log_min_error_statement=log (or more)
# log_connections=on
# log_disconnections=on
# log_line_prefix='%m|%u|%d|%c|' (if you don't use CSV logging)
# log_statement='all'
# lc_messages must be set to English (encoding does not matter)
# bytea_output=escape (from version 9.0 on, only if you want to replay the log on 8.4 or earlier)
#
# Additionally, doit has a bit of an anti-feature with command substitution,
# so you have to escape %'s by Python %-formatting rules (no way to disable this behavior).
def task_project1_setup():
import math
import os
mem_limit = math.ceil(float(awk['/MemTotal/ { printf \"%.3f\", $2/1024/1024 }', "/proc/meminfo"]()))
num_cpu = int(grep["-c", "^processor", "/proc/cpuinfo"]())
echo_list = [
"max_connections=100",
f"shared_buffers={int(mem_limit/4)}GB",
f"effective_cache_size={int(mem_limit*3/4)}GB",
f"maintenance_work_mem={int(1024*mem_limit/16)}MB",
"min_wal_size=1GB",
"max_wal_size=4GB",
"checkpoint_completion_target=0.9",
"wal_buffers=16MB",
"default_statistics_target=100",
"random_page_cost=1.1",
"effective_io_concurrency=100",
f"max_parallel_workers={num_cpu}",
f"max_worker_processes={num_cpu}",
f"max_parallel_workers_per_gather=2",
f"max_parallel_maintenance_workers=2",
f"work_mem={int(1024*1024*(mem_limit*3/4)/(300)/4)}kB",
"shared_preload_libraries='pg_stat_statements,pg_qualstats'",
"pg_qualstats.track_constants=OFF",
"pg_qualstats.sample_rate=1",
"pg_qualstats.resolve_oids=True",
"compute_query_id=ON",
]
echos = "\n".join(echo_list)
return {
"actions": [
# Install dependencies
lambda: cmd.sudo["apt-get"]["install", "-y", "postgresql-14-hypopg"].run_fg(),
lambda: cmd.sudo["apt-get"]["install", "-y", "postgresql-14-pg-qualstats"].run_fg(),
lambda: cmd.sudo["apt-get"]["install", "-y", "libpq-dev"].run_fg(),
"git submodule update --init --recursive",
"pip install pandas",
"pip install psycopg2",
"pip install pglast",
lambda: os.chdir("behavior/modeling/featurewiz"),
"pip install -r requirements.txt",
lambda: os.chdir(doit.get_initial_workdir()),
"pip install -r requirements.txt",
# Open the models
"mkdir -p artifacts",
"cp models.tgz artifacts/",
lambda: os.chdir("artifacts"),
"tar zxf models.tgz",
lambda: os.chdir(doit.get_initial_workdir()),
"rm -rf blocklist.txt",
"rm -rf pending.txt",
lambda: cmd.sudo["bash"]["-c", f"echo \"{echos}\" >> /etc/postgresql/14/main/postgresql.conf"].run_fg(),
lambda: cmd.sudo["systemctl"]["restart", "postgresql"].run_fg(),
"until pg_isready ; do sleep 1 ; done",
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_project1():
NUM_GIFTED_CHILD = 5
NUM_CANDIDATES = 5
NUM_SUBMIT = 2
NUM_PENDING = 20
def construct_index_name(base_tbl, index_cols, include_cols):
return "idx_" + base_tbl + "_keys_" + ("$".join(index_cols)) + "_inc_" + ("$".join(include_cols))
def reverse_index_sql(index_name):
component0 = index_name.split("idx_")[1]
component1 = component0.split("_keys_")
component2 = component1[1].split("_inc_")
tbl_name = component1[0]
index_cols = component2[0].replace("$", ",")
include_cols = component2[1].replace("$", ",")
if len(include_cols) == 0:
return f"CREATE INDEX \"{index_name}\" ON {tbl_name}({index_cols});"
else:
return f"CREATE INDEX \"{index_name}\" ON {tbl_name}({index_cols}) INCLUDE ({include_cols});"
def execute_query(connection, query, output_dict=False, key=None):
from psycopg2.extras import RealDictCursor
if output_dict:
records = {}
else:
records = []
with connection.cursor(cursor_factory=RealDictCursor) as cursor:
cursor.execute(query)
for record in cursor:
if output_dict:
records[record[key]] = record
else:
records.append(record)
return records
def fetch_useless_indexes(connection):
unused_query = """
SELECT s.indexrelname AS indexname
FROM pg_catalog.pg_stat_user_indexes s
JOIN pg_catalog.pg_index i ON s.indexrelid = i.indexrelid
WHERE s.idx_scan = 0 -- has never been scanned
AND 0 <>ALL (i.indkey) -- no index column is an expression
AND NOT i.indisexclusion -- is not an EXCLUSION
AND NOT i.indisunique -- is not a UNIQUE index
AND NOT EXISTS -- does not enforce a constraint
(SELECT 1 FROM pg_catalog.pg_constraint c WHERE c.conindid = s.indexrelid)
ORDER BY pg_relation_size(s.indexrelid) DESC;
"""
return [record['indexname'] for record in execute_query(connection, unused_query)]
def compute_existing_indexes(connection):
existing_indexes_sql_query = """
SELECT statidx.relid,
statidx.indexrelid,
statidx.relname,
statidx.indexrelname,
idx.indnatts,
idx.indnkeyatts,
STRING_AGG(att.attname, ',') as columns
FROM pg_stat_user_indexes statidx,
pg_index idx,
pg_attribute att
WHERE idx.indexrelid = statidx.indexrelid
AND att.attrelid = statidx.relid
AND att.attnum = ANY(idx.indkey)
AND array_position(idx.indkey, att.attnum) < idx.indnkeyatts
AND att.atttypid != 0
AND idx.indisunique = False
AND idx.indisprimary = False
AND idx.indisexclusion = False
AND NOT EXISTS (SELECT 1 FROM pg_catalog.pg_constraint c WHERE c.conindid = statidx.indexrelid)
GROUP BY statidx.relid,
statidx.indexrelid,
statidx.relname,
statidx.indexrelname,
idx.indisunique,
idx.indnatts,
idx.indnkeyatts
"""
existing_indexes_include_sql_query = """
SELECT statidx.relid,
statidx.indexrelid,
statidx.relname,
statidx.indexrelname,
idx.indnatts,
idx.indnkeyatts,
STRING_AGG(att.attname, ',') as columns
FROM pg_stat_user_indexes statidx,
pg_index idx,
pg_attribute att
WHERE idx.indexrelid = statidx.indexrelid
AND att.attrelid = statidx.relid
AND att.attnum = ANY(idx.indkey)
AND array_position(idx.indkey, att.attnum) >= idx.indnkeyatts
AND att.atttypid != 0
AND idx.indisunique = False
AND idx.indisprimary = False
AND idx.indisexclusion = False
AND NOT EXISTS (SELECT 1 FROM pg_catalog.pg_constraint c WHERE c.conindid = statidx.indexrelid)
GROUP BY statidx.relid,
statidx.indexrelid,
statidx.relname,
statidx.indexrelname,
idx.indisunique,
idx.indnatts,
idx.indnkeyatts
"""
existing_indexes = execute_query(connection, existing_indexes_sql_query)
existing_indexes_include = execute_query(connection, existing_indexes_include_sql_query, True, 'indexrelname')
indexes = {}
for record in existing_indexes:
index_name = record['indexrelname']
include_columns = []
if index_name in existing_indexes_include:
include_columns = existing_indexes_include[index_name]['columns'].split(',')
index_columns = record['columns'].split(',')
if len(index_columns) != record['indnkeyatts']:
# malformed index entry
continue
if len(index_columns) + len(include_columns) - len(set(index_columns).intersection(set(include_columns))) != record['indnatts']:
# there might be a duplicate or it's malformed
continue
marked_name = construct_index_name(record["relname"], index_columns, include_columns)
indexes[marked_name] = {
'relname': record["relname"],
'indexrelname': index_name,
'index_columns': index_columns,
'include_columns': include_columns
}
return indexes
def get_indexadvisor(connection):
sql_query = "SELECT v FROM json_array_elements(pg_qualstats_index_advisor(min_filter=>0, min_selectivity=>0)->'indexes') v ORDER BY v::text COLLATE \"C\";"
indexes = {}
with connection.cursor() as cursor:
cursor.execute(sql_query)
for record in cursor:
components = record[0].split(' ON ')
table = components[1].split(' ')[0]
if '.' in table:
table = table.split('.')[1]
fields_start = record[0].split('(')[1]
fields_str = fields_start.split(')')[0]
fields_str = fields_str.replace(' ', '')
fields = fields_str.split(',')
if len(fields) == 0:
continue
indexrelname = construct_index_name(table, fields, [])
indexes[indexrelname] = {
'relname': table,
'indexrelname': indexrelname,
'index_columns': fields,
'include_columns': []
}
return indexes
def mutate(connection, existing_indexes):
import random
tables = {}
sql_query = """
SELECT tbl.relid, tbl.relname, STRING_AGG(att.attname, ',') as columns
FROM pg_stat_user_tables tbl, pg_attribute att
WHERE tbl.relid = att.attrelid AND NOT att.attisdropped AND att.attnum > 0
GROUP BY tbl.relid, tbl.relname
"""
results = execute_query(connection, sql_query)
for record in results:
tables[record['relname']] = set(record['columns'].split(','))
candidates = {}
for (index, value) in existing_indexes.items():
rel = value['relname']
if rel not in tables:
continue
index_columns = value['index_columns']
include_columns = value['include_columns']
valid_new_index_columns = tables[rel] - set(index_columns) - set(include_columns)
if len(valid_new_index_columns) == 0:
continue
new_index = random.choice(tuple(valid_new_index_columns))
new_include = random.choice(tuple(valid_new_index_columns))
new_index_name = construct_index_name(rel, index_columns + [new_index], include_columns)
new_include_name = construct_index_name(rel, index_columns, include_columns + [new_include])
candidates[new_index_name] = {
'relname': rel,
'indexrelname': new_index_name,
'index_columns': index_columns + [new_index],
'include_columns': include_columns
}
candidates[new_include_name] = {
'relname': rel,
'indexrelname': new_include_name,
'index_columns': index_columns,
'include_columns': include_columns + [new_include]
}
for i in range(NUM_GIFTED_CHILD):
# This is the "gifted child phase of this process".
table_set = [key for (key, _) in tables.items()]
random_table = random.choice(tuple(table_set))
random_column = random.choice(tuple(tables[random_table]))
name = construct_index_name(random_table, [random_column], [])
candidates[name] = {
'relname': random_table,
'indexrelname': name,
'index_columns': [random_column],
'include_columns': []
}
return candidates
def eliminate_candidates(candidates, blocklist, pending):
import random
# Remove any candidates that have been blocked already.
remove_list = []
for candidate in candidates:
if candidate in blocklist:
remove_list.append(candidate)
elif candidate in pending:
remove_list.append(candidate)
[candidates.pop(remove) for remove in remove_list]
# Append candidates and shuffle the list
candidate_keys = [key for (key, value) in candidates.items()]
candidate_list = list(pending.union(candidate_keys))
random.shuffle(candidate_list)
# At-most only evaluate X many candidates with models.
return candidate_list[0:NUM_CANDIDATES], candidate_list[NUM_CANDIDATES:]
def evaluate(connection, workloads, candidates):
import pickle
from pathlib import Path
from operator import itemgetter
import numpy as np
model_dict = {}
for ou_type_path in Path("artifacts/gbm/").glob("*"):
ou_type = ou_type_path.name
model_path = list(ou_type_path.glob("*.pkl"))
assert len(model_path) == 1
model_path = model_path[0]
with open(model_path, "rb") as model_file:
model = pickle.load(model_file)
model_dict[ou_type] = model
workload_features = {}
candidate_costs = {}
with connection.cursor() as cursor:
for candidate in candidates:
candidate_cost = 0
sql = reverse_index_sql(candidate)
try:
cursor.execute(f"SELECT * FROM hypopg_create_index('{sql}')")
for workload in workloads:
cursor.execute(f"EXPLAIN (FORMAT JSON) {workload}")
for record in cursor:
json = record[0]
try:
def diff(plan):
if 'Plans' not in plan:
return
for plan_obj in plan['Plans']:
if plan_obj['Startup Cost'] >= plan['Startup Cost']:
plan['Startup Cost'] = 0.00001
else:
plan['Startup Cost'] -= plan_obj['Startup Cost']
if plan_obj['Total Cost'] >= plan['Total Cost']:
plan['Total Cost'] = 0.00001
else:
plan['Total Cost'] -= plan_obj['Total Cost']
diff(plan_obj)
def accumulate(plan):
cost = 0
if 'Plans' in plan:
for plan_obj in plan['Plans']:
cost = cost + accumulate(plan_obj)
ou_type = plan['Node Type'].replace(' ', '')
if ou_type == "NestedLoop":
ou_type = "NestLoop"
if ou_type == "Aggregate":
ou_type = "Agg"
rows = float(plan['Plan Rows']) if plan['Plan Rows'] > 0 else 0.00001
width = float(plan['Plan Width']) if plan['Plan Width'] > 0 else 0.00001
startup = plan['Startup Cost'] if plan['Startup Cost'] > 0 else 0.00001
total = plan['Total Cost'] if plan['Total Cost'] > 0 else 0.00001
x = np.asarray([rows, width, startup, total]).reshape(1, -1)
try:
cost = cost + model_dict[ou_type].predict(x)[0][-1]
except Exception as e:
# Case where OU model does not exist. IN this case penalize.
cost = cost + 5 # penalty of 5
print(e)
return cost
root = json[0]['Plan']
diff(root)
candidate_cost = candidate_cost + accumulate(root) * workloads[workload]
except Exception as e:
# It's possible that we don't actually have the OU model sad.
print(e)
pass
except Exception as e:
print(e)
pass
cursor.execute(f"SELECT hypopg_reset()")
candidate_costs[candidate] = candidate_cost
result = dict(sorted(candidate_costs.items(), key=itemgetter(1))[0:NUM_SUBMIT])
for k, v in result.items():
candidate_costs.pop(k)
return [k for (k, _) in result.items()], [k for (k, _) in candidate_costs.items()]
def derive_actions(workload_csv, timeout):
import psycopg2
blocklist = set()
try:
with open("blocklist.txt", "r") as f:
lines = f.read().splitlines()
blocklist.update(lines)
except Exception as e:
print(e)
pass
pending = set()
try:
with open("pending.txt", "r") as f:
lines = f.read().splitlines()
pending.update(lines)
except Exception as e:
print(e)
pass
actions = []
workloads = process_workload(workload_csv)
with psycopg2.connect("host=localhost dbname=project1db user=project1user password=<PASSWORD>") as connection:
# Turn on auto-commit.
connection.set_session(autocommit=True)
connection.cursor().execute("CREATE EXTENSION IF NOT EXISTS hypopg")
connection.cursor().execute("CREATE EXTENSION IF NOT EXISTS pg_qualstats")
# Get set of useless indexes.
useless_indexes = fetch_useless_indexes(connection)
# Compute the existing set of indexes.
existing_indexes = compute_existing_indexes(connection)
# Always drop any index that is in useless_indexes
remove_list = []
orig_names = {value['indexrelname']: key for (key, value) in existing_indexes.items()}
for index in useless_indexes:
if index in orig_names:
remove_list.append(orig_names[index])
blocklist.add(index)
actions.append(f"DROP INDEX IF EXISTS {index};")
[existing_indexes.pop(index) for index in remove_list]
# get_indexadvisor() definition
advised_indexes = get_indexadvisor(connection)
# let the mutations of the future decide the true course...
candidates = mutate(connection, existing_indexes)
for (k, v) in advised_indexes.items():
candidates[k] = v
# eliminate candidates based on pending and blocklist
candidates, pending = eliminate_candidates(candidates, blocklist, pending)
# evaluate hypothetical index performances
candidates, unselected = evaluate(connection, workloads, candidates)
pending.extend(unselected)
actions.append("SELECT hypopg_reset();")
for candidate in candidates:
actions.append(reverse_index_sql(candidate))
actions.append("SELECT pg_qualstats_reset();")
with open("actions.sql", "w") as f:
for action in actions:
f.write(action)
f.write("\n")
with open("blocklist.txt", "w") as f:
for item in blocklist:
f.write(item + "\n")
with open("pending.txt", "w") as f:
items = pending[0:NUM_PENDING]
for item in items:
f.write(item + "\n")
print("Done generating actions for round")
def process_workload(workload_csv):
import pandas as pd
import pglast
data = pd.read_csv(workload_csv, header=None)
filters = [
"statement: begin",
"statement: alter system set",
"statement: set",
"statement: rollback",
"statement: commit",
"create extension",
"hypopg",
"pg_catalog",
"pg_qualstats",
"pg_stat",
"current_schema",
"version",
"show",
]
selection_vector = data[13].str.contains("statement: ")
selection_vector = selection_vector & (~(data[13] == "statement: "))
for kw in filters:
selection_vector = selection_vector & (~data[13].str.lower().str.contains(kw))
data = data.loc[selection_vector][13]
data = data.str.replace("statement: ", "")
data.reset_index(inplace=True, drop=True)
fingerprint = []
queries = []
import psycopg2
with psycopg2.connect("host=localhost dbname=project1db user=project1user password=<PASSWORD>") as connection:
connection.set_session(autocommit=True)
connection.cursor().execute("CREATE EXTENSION IF NOT EXISTS pg_qualstats")
for query in data:
try:
connection.cursor().execute(query)
except Exception as e:
# At this point, we can't deal with the exception. Sigh.
# But postgres also doesn't give us enough info so SIGH.
print(e)
pass
queries.append(query)
fingerprint.append(pglast.parser.fingerprint(query))
data = pd.DataFrame.from_records(zip(list(data), fingerprint), columns=["statement", "fingerprint"])
def apply_func(df):
row = df.head(1)
row["frequency"] = df.shape[0]
return row
data = data.groupby("fingerprint").apply(apply_func)
data.sort_values(by=["frequency"], ascending=False, inplace=True)
data.reset_index(drop=True, inplace=True)
output = {}
for rec in data.itertuples():
output[rec[1]] = rec[3]
return output
return {
"actions": [derive_actions],
"uptodate": [False],
"verbosity": 2,
"params": [
{
"name": "workload_csv",
"long": "workload_csv",
"help": "The PostgreSQL workload to optimize for.",
"default": None,
},
{
"name": "timeout",
"long": "timeout",
"help": "The time allowed for execution before this dodo task will be killed.",
"default": None,
},
],
}
def task_project1_enable_logging():
"""
Project1: enable logging. (will cause a restart)
"""
sql_list = [
"ALTER SYSTEM SET log_destination='csvlog'",
"ALTER SYSTEM SET logging_collector='on'",
"ALTER SYSTEM SET log_statement='all'",
# For pgreplay.
"ALTER SYSTEM SET log_connections='on'",
"ALTER SYSTEM SET log_disconnections='on'",
]
return {
"actions": [
*[
f'PGPASSWORD={DEFAULT_PASS} psql --host=localhost --dbname={DEFAULT_DB} --username={DEFAULT_USER} --command="{sql}"'
for sql in sql_list
],
lambda: cmd.sudo["systemctl"]["restart", "postgresql"].run_fg(),
"until pg_isready ; do sleep 1 ; done",
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_project1_disable_logging():
"""
Project1: disable logging. (will cause a restart)
This function will reset to the default parameters on PostgreSQL 14,
which is not necessarily the right thing to do -- for example, if you
had custom settings before enable/disable, those custom settings
will not be restored.
"""
sql_list = [
"ALTER SYSTEM SET log_destination='stderr'",
"ALTER SYSTEM SET logging_collector='off'",
"ALTER SYSTEM SET log_statement='none'",
# For pgreplay.
"ALTER SYSTEM SET log_connections='off'",
"ALTER SYSTEM SET log_disconnections='off'",
]
return {
"actions": [
*[
f'PGPASSWORD={DEFAULT_PASS} psql --host=localhost --dbname={DEFAULT_DB} --username={DEFAULT_USER} --command="{sql}"'
for sql in sql_list
],
lambda: cmd.sudo["systemctl"]["restart", "postgresql"].run_fg(),
"until pg_isready ; do sleep 1 ; done",
],
"verbosity": VERBOSITY_DEFAULT,
}
def task_project1_reset_db():
"""
Project1: drop (if exists) and create project1db.
"""
return {
"actions": [
# Drop the project database if it exists.
f"PGPASSWORD={DEFAULT_PASS} dropdb --host=localhost --username={DEFAULT_USER} --if-exists {DEFAULT_DB}",
# Create the project database.
f"PGPASSWORD={<PASSWORD>} createdb --host=localhost --username={DEFAULT_USER} {DEFAULT_DB}",
"until pg_isready ; do sleep 1 ; done",
],
"verbosity": VERBOSITY_DEFAULT,
}
``` |
{
"source": "18000548d/bigchaindb-driver",
"score": 2
} |
#### File: bigchaindb-driver/tests/test_utils.py
```python
from pytest import mark
@mark.parametrize('node,normalized_node', (
(None, ({'endpoint': 'http://localhost:9984', 'headers': {}},)),
('localhost', ({'endpoint': 'http://localhost:9984', 'headers': {}},)),
('http://localhost',
({'endpoint': 'http://localhost:9984', 'headers': {}},)),
('http://localhost:80',
({'endpoint': 'http://localhost:80', 'headers': {}},)),
('https://node.xyz',
({'endpoint': 'https://node.xyz:443', 'headers': {}},)),
('https://node.xyz/path',
({'endpoint': 'https://node.xyz:443/path', 'headers': {}},)),
))
def test_single_node_normalization(node, normalized_node):
from bigchaindb_driver.utils import normalize_nodes, normalize_url
assert normalize_nodes(normalize_url(node)) == normalized_node
@mark.parametrize('nodes,normalized_nodes', (
((), ({'endpoint': 'http://localhost:9984', 'headers': {}},)),
([], ({'endpoint': 'http://localhost:9984', 'headers': {}},)),
(('localhost',
'https://node.xyz'),
({'endpoint': 'http://localhost:9984',
'headers': {}},
{'endpoint': 'https://node.xyz:443',
'headers': {}})),
))
def test_iterable_of_nodes_normalization(nodes, normalized_nodes):
from bigchaindb_driver.utils import normalize_nodes
assert normalize_nodes(*nodes) == normalized_nodes
``` |
{
"source": "18001178267/Image_augment",
"score": 2
} |
#### File: test/augmenters/test_meta.py
```python
from __future__ import print_function, division, absolute_import
import os
import warnings
import sys
import itertools
import copy
from abc import ABCMeta, abstractmethod
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import six
import six.moves as sm
import cv2
import PIL.Image
import imageio
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug import random as iarandom
from imgaug.testutils import (create_random_images, create_random_keypoints,
array_equal_lists, keypoints_equal, reseed,
assert_cbaois_equal,
runtest_pickleable_uint8_img,
TemporaryDirectory)
from imgaug.augmentables.heatmaps import HeatmapsOnImage
from imgaug.augmentables.segmaps import SegmentationMapsOnImage
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from imgaug.augmentables.polys import _ConcavePolygonRecoverer
from imgaug.augmentables.batches import _BatchInAugmentation
IS_PY36_OR_HIGHER = (sys.version_info[0] == 3 and sys.version_info[1] >= 6)
class _InplaceDummyAugmenterImgsArray(iaa.meta.Augmenter):
def __init__(self, addval):
super(_InplaceDummyAugmenterImgsArray, self).__init__()
self.addval = addval
def _augment_batch_(self, batch, random_state, parents, hooks):
batch.images += self.addval
return batch
def get_parameters(self):
return []
class _InplaceDummyAugmenterImgsList(iaa.meta.Augmenter):
def __init__(self, addval):
super(_InplaceDummyAugmenterImgsList, self).__init__()
self.addval = addval
def _augment_batch_(self, batch, random_state, parents, hooks):
assert len(batch.images) > 0
for i in range(len(batch.images)):
batch.images[i] += self.addval
return batch
def get_parameters(self):
return []
class _InplaceDummyAugmenterSegMaps(iaa.meta.Augmenter):
def __init__(self, addval):
super(_InplaceDummyAugmenterSegMaps, self).__init__()
self.addval = addval
def _augment_batch_(self, batch, random_state, parents, hooks):
assert len(batch.segmentation_maps) > 0
for i in range(len(batch.segmentation_maps)):
batch.segmentation_maps[i].arr += self.addval
return batch
def get_parameters(self):
return []
class _InplaceDummyAugmenterKeypoints(iaa.meta.Augmenter):
def __init__(self, x, y):
super(_InplaceDummyAugmenterKeypoints, self).__init__()
self.x = x
self.y = y
def _augment_batch_(self, batch, random_state, parents, hooks):
assert len(batch.keypoints) > 0
for i in range(len(batch.keypoints)):
kpsoi = batch.keypoints[i]
for j in range(len(kpsoi)):
batch.keypoints[i].keypoints[j].x += self.x
batch.keypoints[i].keypoints[j].y += self.y
return batch
def get_parameters(self):
return []
class TestIdentity(unittest.TestCase):
def setUp(self):
reseed()
def test_images(self):
aug = iaa.Identity()
images = create_random_images((16, 70, 50, 3))
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_images_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
images = create_random_images((16, 70, 50, 3))
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_heatmaps(self):
aug = iaa.Identity()
heatmaps_arr = np.linspace(0.0, 1.0, 2*2, dtype="float32")\
.reshape((2, 2, 1))
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
observed = aug.augment_heatmaps(heatmaps)
assert np.allclose(observed.arr_0to1, heatmaps.arr_0to1)
def test_heatmaps_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
heatmaps_arr = np.linspace(0.0, 1.0, 2*2, dtype="float32")\
.reshape((2, 2, 1))
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
observed = aug_det.augment_heatmaps(heatmaps)
assert np.allclose(observed.arr_0to1, heatmaps.arr_0to1)
def test_segmentation_maps(self):
aug = iaa.Identity()
segmaps_arr = np.arange(2*2).reshape((2, 2, 1)).astype(np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(2, 2, 3))
observed = aug.augment_segmentation_maps(segmaps)
assert np.array_equal(observed.arr, segmaps.arr)
def test_segmentation_maps_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
segmaps_arr = np.arange(2*2).reshape((2, 2, 1)).astype(np.int32)
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(2, 2, 3))
observed = aug_det.augment_segmentation_maps(segmaps)
assert np.array_equal(observed.arr, segmaps.arr)
def test_keypoints(self):
aug = iaa.Identity()
keypoints = create_random_keypoints((16, 70, 50, 3), 4)
observed = aug.augment_keypoints(keypoints)
assert_cbaois_equal(observed, keypoints)
def test_keypoints_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
keypoints = create_random_keypoints((16, 70, 50, 3), 4)
observed = aug_det.augment_keypoints(keypoints)
assert_cbaois_equal(observed, keypoints)
def test_polygons(self):
aug = iaa.Identity()
polygon = ia.Polygon([(10, 10), (30, 10), (30, 50), (10, 50)])
psoi = ia.PolygonsOnImage([polygon], shape=(100, 75, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_polygons_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
polygon = ia.Polygon([(10, 10), (30, 10), (30, 50), (10, 50)])
psoi = ia.PolygonsOnImage([polygon], shape=(100, 75, 3))
observed = aug_det.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_line_strings(self):
aug = iaa.Identity()
ls = LineString([(10, 10), (30, 10), (30, 50), (10, 50)])
lsoi = LineStringsOnImage([ls], shape=(100, 75, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_line_strings_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
ls = LineString([(10, 10), (30, 10), (30, 50), (10, 50)])
lsoi = LineStringsOnImage([ls], shape=(100, 75, 3))
observed = aug_det.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_bounding_boxes(self):
aug = iaa.Identity()
bbs = ia.BoundingBox(x1=10, y1=10, x2=30, y2=50)
bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(100, 75, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_bounding_boxes_deterministic(self):
aug_det = iaa.Identity().to_deterministic()
bbs = ia.BoundingBox(x1=10, y1=10, x2=30, y2=50)
bbsoi = ia.BoundingBoxesOnImage([bbs], shape=(100, 75, 3))
observed = aug_det.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_keypoints_empty(self):
aug = iaa.Identity()
kpsoi = ia.KeypointsOnImage([], shape=(4, 5, 3))
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
def test_polygons_empty(self):
aug = iaa.Identity()
psoi = ia.PolygonsOnImage([], shape=(4, 5, 3))
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_line_strings_empty(self):
aug = iaa.Identity()
lsoi = ia.LineStringsOnImage([], shape=(4, 5, 3))
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_bounding_boxes_empty(self):
aug = iaa.Identity()
bbsoi = ia.BoundingBoxesOnImage([], shape=(4, 5, 3))
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
def test_get_parameters(self):
assert iaa.Identity().get_parameters() == []
def test_other_dtypes_bool(self):
aug = iaa.Identity()
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == image.dtype.type
assert np.all(image_aug == image)
def test_other_dtypes_uint_int(self):
aug = iaa.Identity()
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_float(self):
aug = iaa.Identity()
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_pickleable(self):
aug = iaa.Noop()
runtest_pickleable_uint8_img(aug, iterations=2)
class TestNoop(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.Noop()
assert isinstance(aug, iaa.Identity)
def test_images(self):
image = np.mod(np.arange(10*10*3), 255)
image = image.astype(np.uint8).reshape((10, 10, 3))
image_aug = iaa.Noop()(image=image)
assert np.array_equal(image, image_aug)
# TODO add tests for line strings
class TestLambda(unittest.TestCase):
def setUp(self):
reseed()
@property
def base_img(self):
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
return base_img
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
return heatmaps
@property
def heatmaps_aug(self):
heatmaps_arr_aug = np.float32([[0.5, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr_aug, shape=(3, 3, 3))
return heatmaps
@property
def segmentation_maps(self):
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
return segmaps
@property
def segmentation_maps_aug(self):
segmaps_arr_aug = np.int32([[1, 1, 2],
[1, 1, 2],
[1, 2, 2]])
segmaps = SegmentationMapsOnImage(segmaps_arr_aug, shape=(3, 3, 3))
return segmaps
@property
def keypoints(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
kpsoi = [ia.KeypointsOnImage(kps, shape=(3, 3, 3))]
return kpsoi
@property
def keypoints_aug(self):
expected_kps = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=1),
ia.Keypoint(x=0, y=2)]
expected = [ia.KeypointsOnImage(expected_kps, shape=(3, 3, 3))]
return expected
@property
def polygons(self):
poly = ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
psois = [ia.PolygonsOnImage([poly], shape=(3, 3, 3))]
return psois
@property
def polygons_aug(self):
expected_poly = ia.Polygon([(1, 2), (3, 2), (3, 4), (1, 4)])
expected_psoi = [ia.PolygonsOnImage([expected_poly], shape=(3, 3, 3))]
return expected_psoi
@property
def lsoi(self):
ls = ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])
lsois = [ia.LineStringsOnImage([ls], shape=(3, 3, 3))]
return lsois
@property
def lsoi_aug(self):
ls = ia.LineString([(1, 2), (3, 2), (3, 4), (1, 4)])
lsois = [ia.LineStringsOnImage([ls], shape=(3, 3, 3))]
return lsois
@property
def bbsoi(self):
bb = ia.BoundingBox(x1=0, y1=1, x2=3, y2=4)
bbsois = [ia.BoundingBoxesOnImage([bb], shape=(3, 3, 3))]
return bbsois
@property
def bbsoi_aug(self):
bb = ia.BoundingBox(x1=0+1, y1=1+2, x2=3+1, y2=4+2)
bbsois = [ia.BoundingBoxesOnImage([bb], shape=(3, 3, 3))]
return bbsois
@classmethod
def func_images(cls, images, random_state, parents, hooks):
if isinstance(images, list):
images = [image + 1 for image in images]
else:
images = images + 1
return images
@classmethod
def func_heatmaps(cls, heatmaps, random_state, parents, hooks):
heatmaps[0].arr_0to1[0, 0] += 0.5
return heatmaps
@classmethod
def func_segmaps(cls, segmaps, random_state, parents, hooks):
segmaps[0].arr += 1
return segmaps
@classmethod
def func_keypoints(cls, keypoints_on_images, random_state, parents, hooks):
for keypoints_on_image in keypoints_on_images:
for kp in keypoints_on_image.keypoints:
kp.x = (kp.x + 1) % 3
return keypoints_on_images
@classmethod
def func_polygons(cls, polygons_on_images, random_state, parents, hooks):
if len(polygons_on_images[0].polygons) == 0:
return [ia.PolygonsOnImage([], shape=polygons_on_images[0].shape)]
new_exterior = np.copy(polygons_on_images[0].polygons[0].exterior)
new_exterior[:, 0] += 1
new_exterior[:, 1] += 2
return [
ia.PolygonsOnImage([ia.Polygon(new_exterior)],
shape=polygons_on_images[0].shape)
]
@classmethod
def func_line_strings(cls, line_strings_on_images, random_state, parents,
hooks):
if line_strings_on_images[0].empty:
return [ia.LineStringsOnImage(
[], shape=line_strings_on_images[0].shape)]
new_coords = np.copy(line_strings_on_images[0].items[0].coords)
new_coords[:, 0] += 1
new_coords[:, 1] += 2
return [
ia.LineStringsOnImage(
[ia.LineString(new_coords)],
shape=line_strings_on_images[0].shape)
]
@classmethod
def func_bbs(cls, bounding_boxes_on_images, random_state, parents, hooks):
if bounding_boxes_on_images[0].empty:
return [
ia.BoundingBoxesOnImage(
[], shape=bounding_boxes_on_images[0].shape)
]
new_coords = np.copy(bounding_boxes_on_images[0].items[0].coords)
new_coords[:, 0] += 1
new_coords[:, 1] += 2
return [
ia.BoundingBoxesOnImage(
[ia.BoundingBox(x1=new_coords[0][0], y1=new_coords[0][1],
x2=new_coords[1][0], y2=new_coords[1][1])],
shape=bounding_boxes_on_images[0].shape)
]
def test_images(self):
image = self.base_img
expected = image + 1
aug = iaa.Lambda(func_images=self.func_images)
for _ in sm.xrange(3):
observed = aug.augment_image(image)
assert np.array_equal(observed, expected)
def test_images_deterministic(self):
image = self.base_img
expected = image + 1
aug_det = iaa.Lambda(func_images=self.func_images).to_deterministic()
for _ in sm.xrange(3):
observed = aug_det.augment_image(image)
assert np.array_equal(observed, expected)
def test_images_list(self):
image = self.base_img
expected = [image + 1]
aug = iaa.Lambda(func_images=self.func_images)
observed = aug.augment_images([image])
assert array_equal_lists(observed, expected)
def test_images_list_deterministic(self):
image = self.base_img
expected = [image + 1]
aug_det = iaa.Lambda(func_images=self.func_images).to_deterministic()
observed = aug_det.augment_images([image])
assert array_equal_lists(observed, expected)
def test_heatmaps(self):
heatmaps = self.heatmaps
heatmaps_arr_aug = self.heatmaps_aug.get_arr()
aug = iaa.Lambda(func_heatmaps=self.func_heatmaps)
for _ in sm.xrange(3):
observed = aug.augment_heatmaps(heatmaps)
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
def test_heatmaps_deterministic(self):
heatmaps = self.heatmaps
heatmaps_arr_aug = self.heatmaps_aug.get_arr()
aug_det = iaa.Lambda(func_heatmaps=self.func_heatmaps)\
.to_deterministic()
for _ in sm.xrange(3):
observed = aug_det.augment_heatmaps(heatmaps)
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), heatmaps_arr_aug)
def test_segmentation_maps(self):
segmaps = self.segmentation_maps
segmaps_arr_aug = self.segmentation_maps_aug.get_arr()
aug = iaa.Lambda(func_segmentation_maps=self.func_segmaps)
for _ in sm.xrange(3):
observed = aug.augment_segmentation_maps(segmaps)
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.get_arr(), segmaps_arr_aug)
def test_segmentation_maps_deterministic(self):
segmaps = self.segmentation_maps
segmaps_arr_aug = self.segmentation_maps_aug.get_arr()
aug_det = iaa.Lambda(func_segmentation_maps=self.func_segmaps)\
.to_deterministic()
for _ in sm.xrange(3):
observed = aug_det.augment_segmentation_maps(segmaps)
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.get_arr(), segmaps_arr_aug)
def test_keypoints(self):
kpsoi = self.keypoints
aug = iaa.Lambda(func_keypoints=self.func_keypoints)
for _ in sm.xrange(3):
observed = aug.augment_keypoints(kpsoi)
expected = self.keypoints_aug
assert_cbaois_equal(observed, expected)
def test_keypoints_deterministic(self):
kpsoi = self.keypoints
aug = iaa.Lambda(func_keypoints=self.func_keypoints)
aug = aug.to_deterministic()
for _ in sm.xrange(3):
observed = aug.augment_keypoints(kpsoi)
expected = self.keypoints_aug
assert_cbaois_equal(observed, expected)
def test_polygons(self):
psois = self.polygons
aug = iaa.Lambda(func_polygons=self.func_polygons)
for _ in sm.xrange(3):
observed = aug.augment_polygons(psois)
expected_psoi = self.polygons_aug
assert_cbaois_equal(observed, expected_psoi)
def test_polygons_deterministic(self):
psois = self.polygons
aug = iaa.Lambda(func_polygons=self.func_polygons)
aug = aug.to_deterministic()
for _ in sm.xrange(3):
observed = aug.augment_polygons(psois)
expected_psoi = self.polygons_aug
assert_cbaois_equal(observed, expected_psoi)
def test_line_strings(self):
lsois = self.lsoi
aug = iaa.Lambda(func_line_strings=self.func_line_strings)
for _ in sm.xrange(3):
observed = aug.augment_line_strings(lsois)
expected_lsoi = self.lsoi_aug
assert_cbaois_equal(observed, expected_lsoi)
def test_line_strings_deterministic(self):
lsois = self.lsoi
aug = iaa.Lambda(func_line_strings=self.func_line_strings)
aug = aug.to_deterministic()
for _ in sm.xrange(3):
observed = aug.augment_line_strings(lsois)
expected_lsoi = self.lsoi_aug
assert_cbaois_equal(observed, expected_lsoi)
def test_bounding_boxes(self):
bbsoi = self.bbsoi
aug = iaa.Lambda(func_bounding_boxes=self.func_bbs)
for _ in sm.xrange(3):
observed = aug.augment_bounding_boxes(bbsoi)
expected = self.bbsoi_aug
assert_cbaois_equal(observed, expected)
def test_bounding_boxes_deterministic(self):
bbsoi = self.bbsoi
aug = iaa.Lambda(func_bounding_boxes=self.func_bbs)
aug = aug.to_deterministic()
for _ in sm.xrange(3):
observed = aug.augment_bounding_boxes(bbsoi)
expected = self.bbsoi_aug
assert_cbaois_equal(observed, expected)
def test_bounding_boxes_x1_x2_coords_can_get_flipped(self):
# Verify that if any augmented BB ends up with x1 > x2 that the
# x-coordinates will be flipped to ensure that x1 is always below x2
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)
], shape=(10, 10, 3))
def _func_bbs(bounding_boxes_on_images, random_state, parents, hooks):
bounding_boxes_on_images[0].bounding_boxes[0].x1 += 10
return bounding_boxes_on_images
aug = iaa.Lambda(func_bounding_boxes=_func_bbs)
for _ in sm.xrange(3):
observed = aug.augment_bounding_boxes(bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(2, 1), (0+10, 3)]
)
def test_bounding_boxes_y1_y2_coords_can_get_flipped(self):
# Verify that if any augmented BB ends up with y1 > y2 that the
# x-coordinates will be flipped to ensure that y1 is always below y2
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)
], shape=(10, 10, 3))
def _func_bbs(bounding_boxes_on_images, random_state, parents, hooks):
bounding_boxes_on_images[0].bounding_boxes[0].y1 += 10
return bounding_boxes_on_images
aug = iaa.Lambda(func_bounding_boxes=_func_bbs)
for _ in sm.xrange(3):
observed = aug.augment_bounding_boxes(bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(0, 3), (2, 1+10)]
)
def test_keypoints_empty(self):
kpsoi = ia.KeypointsOnImage([], shape=(1, 2, 3))
aug = iaa.Lambda(func_keypoints=self.func_keypoints)
observed = aug.augment_keypoints(kpsoi)
assert_cbaois_equal(observed, kpsoi)
def test_polygons_empty(self):
psoi = ia.PolygonsOnImage([], shape=(1, 2, 3))
aug = iaa.Lambda(func_polygons=self.func_polygons)
observed = aug.augment_polygons(psoi)
assert_cbaois_equal(observed, psoi)
def test_line_strings_empty(self):
lsoi = ia.LineStringsOnImage([], shape=(1, 2, 3))
aug = iaa.Lambda(func_line_strings=self.func_line_strings)
observed = aug.augment_line_strings(lsoi)
assert_cbaois_equal(observed, lsoi)
def test_bounding_boxes_empty(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(1, 2, 3))
aug = iaa.Lambda(func_bounding_boxes=self.func_bbs)
observed = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(observed, bbsoi)
# TODO add tests when funcs are not set in Lambda
def test_other_dtypes_bool(self):
def func_images(images, random_state, parents, hooks):
aug = iaa.Flipud(1.0) # flipud is know to work with all dtypes
return aug.augment_images(images)
aug = iaa.Lambda(func_images=func_images)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
expected = np.zeros((3, 3), dtype=bool)
expected[2, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == expected)
def test_other_dtypes_uint_int(self):
def func_images(images, random_state, parents, hooks):
aug = iaa.Flipud(1.0) # flipud is know to work with all dtypes
return aug.augment_images(images)
aug = iaa.Lambda(func_images=func_images)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = np.zeros((3, 3), dtype=dtype)
expected[2, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, expected)
def test_other_dtypes_float(self):
def func_images(images, random_state, parents, hooks):
aug = iaa.Flipud(1.0) # flipud is know to work with all dtypes
return aug.augment_images(images)
aug = iaa.Lambda(func_images=func_images)
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = np.zeros((3, 3), dtype=dtype)
expected[2, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == expected)
def test_pickleable(self):
aug = iaa.Lambda(
func_images=_lambda_pickleable_callback_images,
seed=1)
runtest_pickleable_uint8_img(aug)
def _lambda_pickleable_callback_images(images, random_state, parents, hooks):
aug = iaa.Flipud(0.5, seed=random_state)
return aug.augment_images(images)
class TestAssertLambda(unittest.TestCase):
DTYPES_UINT = ["uint8", "uint16", "uint32", "uint64"]
DTYPES_INT = ["int8", "int32", "int64"]
DTYPES_FLOAT = ["float16", "float32", "float64", "float128"]
def setUp(self):
reseed()
@property
def image(self):
base_img = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
return np.atleast_3d(base_img)
@property
def images(self):
return np.array([self.image])
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
return SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def psoi(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.PolygonsOnImage(polygons, shape=self.image.shape)
@property
def lsoi(self):
lss = [ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.LineStringsOnImage(lss, shape=self.image.shape)
@property
def bbsoi(self):
bb = ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def aug_succeeds(self):
def _func_images_succeeds(images, random_state, parents, hooks):
return images[0][0, 0] == 0 and images[0][2, 2] == 1
def _func_heatmaps_succeeds(heatmaps, random_state, parents, hooks):
return heatmaps[0].arr_0to1[0, 0] < 0 + 1e-6
def _func_segmaps_succeeds(segmaps, random_state, parents, hooks):
return segmaps[0].arr[0, 0] == 0
def _func_keypoints_succeeds(keypoints_on_images, random_state, parents,
hooks):
return (
keypoints_on_images[0].keypoints[0].x == 0
and keypoints_on_images[0].keypoints[2].x == 2
)
def _func_bounding_boxes_succeeds(bounding_boxes_on_images,
random_state, parents, hooks):
return (bounding_boxes_on_images[0].items[0].x1 == 0
and bounding_boxes_on_images[0].items[0].x2 == 2)
def _func_polygons_succeeds(polygons_on_images, random_state, parents,
hooks):
return (polygons_on_images[0].polygons[0].exterior[0][0] == 0
and polygons_on_images[0].polygons[0].exterior[2][1] == 2)
def _func_line_strings_succeeds(line_strings_on_image, random_state,
parents, hooks):
return (line_strings_on_image[0].items[0].coords[0][0] == 0
and line_strings_on_image[0].items[0].coords[2][1] == 2)
return iaa.AssertLambda(
func_images=_func_images_succeeds,
func_heatmaps=_func_heatmaps_succeeds,
func_segmentation_maps=_func_segmaps_succeeds,
func_keypoints=_func_keypoints_succeeds,
func_bounding_boxes=_func_bounding_boxes_succeeds,
func_polygons=_func_polygons_succeeds,
func_line_strings=_func_line_strings_succeeds)
@property
def aug_fails(self):
def _func_images_fails(images, random_state, parents, hooks):
return images[0][0, 0] == 1
def _func_heatmaps_fails(heatmaps, random_state, parents, hooks):
return heatmaps[0].arr_0to1[0, 0] > 0 + 1e-6
def _func_segmaps_fails(segmaps, random_state, parents, hooks):
return segmaps[0].arr[0, 0] == 1
def _func_keypoints_fails(keypoints_on_images, random_state, parents,
hooks):
return keypoints_on_images[0].keypoints[0].x == 2
def _func_bounding_boxes_fails(bounding_boxes_on_images, random_state,
parents, hooks):
return bounding_boxes_on_images[0].items[0].x1 == 2
def _func_polygons_fails(polygons_on_images, random_state, parents,
hooks):
return polygons_on_images[0].polygons[0].exterior[0][0] == 2
def _func_line_strings_fails(line_strings_on_images, random_state,
parents, hooks):
return line_strings_on_images[0].items[0].coords[0][0] == 2
return iaa.AssertLambda(
func_images=_func_images_fails,
func_heatmaps=_func_heatmaps_fails,
func_segmentation_maps=_func_segmaps_fails,
func_keypoints=_func_keypoints_fails,
func_bounding_boxes=_func_bounding_boxes_fails,
func_polygons=_func_polygons_fails,
func_line_strings=_func_line_strings_fails)
def test_images_as_array_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_as_array_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_images(self.images)
def test_images_as_array_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_as_array_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_images(self.images)
def test_images_as_list_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_images([self.images[0]])
expected = [self.images[0]]
assert array_equal_lists(observed, expected)
def test_images_as_list_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_images([self.images[0]])
def test_images_as_list_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_images([self.images[0]])
expected = [self.images[0]]
assert array_equal_lists(observed, expected)
def test_images_as_list_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_images([self.images[0]])
def test_heatmaps_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_heatmaps(self.heatmaps)
def test_heatmaps_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 3, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_heatmaps(self.heatmaps)
def test_segmaps_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_segmaps_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_segmentation_maps(self.segmaps)
def test_segmaps_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 3, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_segmaps_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_segmentation_maps(self.segmaps)
def test_keypoints_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_keypoints(self.kpsoi)
def test_keypoints_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_keypoints(self.kpsoi)
def test_polygons_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_polygons(self.psoi)
def test_polygons_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_polygons(self.psoi)
def test_line_strings_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_line_strings(self.lsoi)
def test_line_strings_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_line_strings(self.lsoi)
def test_bounding_boxes_with_assert_that_succeeds(self):
observed = self.aug_succeeds.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_assert_that_fails(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_bounding_boxes(self.bbsoi)
def test_bounding_boxes_with_assert_that_succeeds__deterministic(self):
aug_succeeds_det = self.aug_succeeds.to_deterministic()
observed = aug_succeeds_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_assert_that_fails__deterministic(self):
with self.assertRaises(AssertionError):
_ = self.aug_fails.augment_bounding_boxes(self.bbsoi)
def test_other_dtypes_bool__with_assert_that_succeeds(self):
def func_images_succeeds(images, random_state, parents, hooks):
return np.allclose(images[0][0, 0], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_succeeds)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == image)
def test_other_dtypes_uint_int__with_assert_that_succeeds(self):
def func_images_succeeds(images, random_state, parents, hooks):
return np.allclose(images[0][0, 0], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_succeeds)
dtypes = self.DTYPES_UINT + self.DTYPES_INT
for dtype in dtypes:
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = 1
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_float__with_assert_that_succeeds(self):
def func_images_succeeds(images, random_state, parents, hooks):
return np.allclose(images[0][0, 0], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_succeeds)
dtypes = self.DTYPES_FLOAT
for dtype in dtypes:
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = 1
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_bool__with_assert_that_fails(self):
def func_images_fails(images, random_state, parents, hooks):
return np.allclose(images[0][0, 1], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_fails)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_other_dtypes_uint_int__with_assert_that_fails(self):
def func_images_fails(images, random_state, parents, hooks):
return np.allclose(images[0][0, 1], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_fails)
dtypes = self.DTYPES_UINT + self.DTYPES_INT
for dtype in dtypes:
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = 1
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_other_dtypes_float__with_assert_that_fails(self):
def func_images_fails(images, random_state, parents, hooks):
return np.allclose(images[0][0, 1], 1, rtol=0, atol=1e-6)
aug = iaa.AssertLambda(func_images=func_images_fails)
dtypes = self.DTYPES_FLOAT
for dtype in dtypes:
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = 1
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_pickleable(self):
aug = iaa.AssertLambda(
func_images=_assertlambda_pickleable_callback_images,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2)
# in py3+, this could be a classmethod of TestAssertLambda,
# but in py2.7 such classmethods are not pickle-able and would cause an error
def _assertlambda_pickleable_callback_images(images, random_state,
parents, hooks):
return np.any(images[0] > 0)
class TestAssertShape(unittest.TestCase):
DTYPES_UINT = ["uint8", "uint16", "uint32", "uint64"]
DTYPES_INT = ["int8", "int32", "int64"]
DTYPES_FLOAT = ["float16", "float32", "float64", "float128"]
def setUp(self):
reseed()
@property
def image(self):
base_img = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0]], dtype=np.uint8)
return np.atleast_3d(base_img)
@property
def images(self):
return np.array([self.image])
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 4, 3))
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0]])
return SegmentationMapsOnImage(segmaps_arr, shape=(3, 4, 3))
@property
def kpsoi(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def psoi(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.PolygonsOnImage(polygons, shape=self.image.shape)
@property
def lsoi(self):
lss = [ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.LineStringsOnImage(lss, shape=self.image.shape)
@property
def bbsoi(self):
bb = ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def image_h4(self):
base_img_h4 = np.array([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0]], dtype=np.uint8)
return np.atleast_3d(base_img_h4)
@property
def images_h4(self):
return np.array([self.image_h4])
@property
def heatmaps_h4(self):
heatmaps_arr_h4 = np.float32([[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0]])
return ia.HeatmapsOnImage(heatmaps_arr_h4, shape=(4, 4, 3))
@property
def segmaps_h4(self):
segmaps_arr_h4 = np.int32([[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0]])
return SegmentationMapsOnImage(segmaps_arr_h4, shape=(4, 4, 3))
@property
def kpsoi_h4(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)]
return ia.KeypointsOnImage(kps, shape=self.image_h4.shape)
@property
def psoi_h4(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.PolygonsOnImage(polygons, shape=self.image_h4.shape)
@property
def lsoi_h4(self):
lss = [ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])]
return ia.LineStringsOnImage(lss, shape=self.image_h4.shape)
@property
def bbsoi_h4(self):
bb = ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)
return ia.BoundingBoxesOnImage([bb], shape=self.image_h4.shape)
@property
def aug_exact_shape(self):
return iaa.AssertShape((1, 3, 4, 1))
@property
def aug_none_in_shape(self):
return iaa.AssertShape((None, 3, 4, 1))
@property
def aug_list_in_shape(self):
return iaa.AssertShape((1, [1, 3, 5], 4, 1))
@property
def aug_tuple_in_shape(self):
return iaa.AssertShape((1, (1, 4), 4, 1))
def test_images_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_with_exact_shape__succeeds__list(self):
aug = self.aug_exact_shape
observed = aug.augment_images([self.images[0]])
expected = [self.images[0]]
assert array_equal_lists(observed, expected)
def test_images_with_exact_shape__succeeds__deterministic__list(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_images([self.images[0]])
expected = [self.images[0]]
assert array_equal_lists(observed, expected)
def test_heatmaps_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_segmaps_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_keypoints_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_polygons_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_line_strings_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_bounding_boxes_with_exact_shape__succeeds(self):
aug = self.aug_exact_shape
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_exact_shape__succeeds__deterministic(self):
aug_det = self.aug_exact_shape.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_images_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_images(self.images_h4)
def test_heatmaps_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_heatmaps(self.heatmaps_h4)
def test_keypoints_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_keypoints(self.kpsoi_h4)
def test_polygons_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_polygons(self.psoi_h4)
def test_line_strings_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_line_strings(self.lsoi_h4)
def test_bounding_boxes_with_exact_shape__fails(self):
aug = self.aug_exact_shape
with self.assertRaises(AssertionError):
_ = aug.augment_bounding_boxes(self.bbsoi_h4)
def test_images_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_heatmaps_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_keypoints_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_polygons_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_line_strings_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_bounding_boxes_with_none_in_shape__succeeds(self):
aug = self.aug_none_in_shape
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_none_in_shape__succeeds__deterministic(self):
aug_det = self.aug_none_in_shape.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_images_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_images(self.images_h4)
def test_heatmaps_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_heatmaps(self.heatmaps_h4)
def test_keypoints_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_keypoints(self.kpsoi_h4)
def test_polygons_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_polygons(self.psoi_h4)
def test_line_strings_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_line_strings(self.lsoi_h4)
def test_bounding_boxes_with_none_in_shape__fails(self):
aug = self.aug_none_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_bounding_boxes(self.bbsoi_h4)
def test_images_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_heatmaps_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_segmaps_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.segmaps.get_arr())
def test_keypoints_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_polygons_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_line_strings_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_bounding_boxes_with_list_in_shape__succeeds(self):
aug = self.aug_list_in_shape
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_list_in_shape__succeeds__deterministic(self):
aug_det = self.aug_list_in_shape.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_images_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_images(self.images_h4)
def test_heatmaps_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_heatmaps(self.heatmaps_h4)
def test_segmaps_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_segmentation_maps(self.segmaps_h4)
def test_keypoints_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_keypoints(self.kpsoi_h4)
def test_polygons_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_polygons(self.psoi_h4)
def test_line_strings_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_line_strings(self.lsoi_h4)
def test_bounding_boxes_with_list_in_shape__fails(self):
aug = self.aug_list_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_bounding_boxes(self.bbsoi_h4)
def test_images_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_images_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_images(self.images)
expected = self.images
assert np.array_equal(observed, expected)
def test_heatmaps_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_heatmaps_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_heatmaps(self.heatmaps)
assert observed.shape == (3, 4, 3)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.allclose(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.heatmaps.get_arr())
def test_segmaps_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_segmentation_maps(self.segmaps)
assert observed.shape == (3, 4, 3)
assert np.array_equal(observed.get_arr(), self.heatmaps.get_arr())
def test_keypoints_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_keypoints_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_keypoints(self.kpsoi)
assert_cbaois_equal(observed, self.kpsoi)
def test_polygons_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_polygons_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_polygons(self.psoi)
assert_cbaois_equal(observed, self.psoi)
def test_line_strings_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_line_strings_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi)
def test_bounding_boxes_with_tuple_in_shape__succeeds(self):
aug = self.aug_tuple_in_shape
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_bounding_boxes_with_tuple_in_shape__succeeds__deterministic(self):
aug_det = self.aug_tuple_in_shape.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi)
def test_images_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_images(self.images_h4)
def test_heatmaps_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_heatmaps(self.heatmaps_h4)
def test_segmaps_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_segmentation_maps(self.segmaps_h4)
def test_keypoints_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_keypoints(self.kpsoi_h4)
def test_polygons_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_polygons(self.psoi_h4)
def test_line_strings_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_line_strings(self.lsoi_h4)
def test_bounding_boxes_with_tuple_in_shape__fails(self):
aug = self.aug_tuple_in_shape
with self.assertRaises(AssertionError):
_ = aug.augment_bounding_boxes(self.bbsoi_h4)
def test_fails_if_shape_contains_invalid_datatype(self):
got_exception = False
try:
aug = iaa.AssertShape((1, False, 4, 1))
_ = aug.augment_images(np.zeros((1, 2, 2, 1), dtype=np.uint8))
except Exception as exc:
assert "Invalid datatype " in str(exc)
got_exception = True
assert got_exception
def test_other_dtypes_bool__succeeds(self):
aug = iaa.AssertShape((None, 3, 3, 1))
image = np.zeros((3, 3, 1), dtype=bool)
image[0, 0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == image.dtype.type
assert np.all(image_aug == image)
def test_other_dtypes_uint_int__succeeds(self):
aug = iaa.AssertShape((None, 3, 3, 1))
for dtype in self.DTYPES_UINT + self.DTYPES_INT:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3, 1), dtype=dtype)
image[0, 0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_float__succeeds(self):
aug = iaa.AssertShape((None, 3, 3, 1))
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(self.DTYPES_FLOAT, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3, 1), dtype=dtype)
image[0, 0, 0] = 1
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_bool__fails(self):
aug = iaa.AssertShape((None, 3, 4, 1))
image = np.zeros((3, 3, 1), dtype=bool)
image[0, 0, 0] = True
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_other_dtypes_uint_int__fails(self):
aug = iaa.AssertShape((None, 3, 4, 1))
for dtype in self.DTYPES_UINT + self.DTYPES_INT:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3, 1), dtype=dtype)
image[0, 0, 0] = value
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_other_dtypes_float__fails(self):
aug = iaa.AssertShape((None, 3, 4, 1))
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(self.DTYPES_FLOAT, values):
image = np.zeros((3, 3, 1), dtype=dtype)
image[0, 0, 0] = value
with self.assertRaises(AssertionError):
_ = aug.augment_image(image)
def test_pickleable(self):
aug = iaa.AssertShape(
shape=(None, 15, 15, None), check_images=True,
seed=1)
runtest_pickleable_uint8_img(aug, iterations=2, shape=(15, 15, 1))
def test_clip_augmented_image_():
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
image = np.zeros((1, 3), dtype=np.uint8)
image[0, 0] = 10
image[0, 1] = 20
image[0, 2] = 30
image_clipped = iaa.clip_augmented_image_(image,
min_value=15, max_value=25)
assert image_clipped[0, 0] == 15
assert image_clipped[0, 1] == 20
assert image_clipped[0, 2] == 25
assert len(caught_warnings) >= 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_clip_augmented_image():
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
image = np.zeros((1, 3), dtype=np.uint8)
image[0, 0] = 10
image[0, 1] = 20
image[0, 2] = 30
image_clipped = iaa.clip_augmented_image(image,
min_value=15, max_value=25)
assert image_clipped[0, 0] == 15
assert image_clipped[0, 1] == 20
assert image_clipped[0, 2] == 25
assert len(caught_warnings) >= 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_clip_augmented_images_():
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
images = np.zeros((2, 1, 3), dtype=np.uint8)
images[:, 0, 0] = 10
images[:, 0, 1] = 20
images[:, 0, 2] = 30
imgs_clipped = iaa.clip_augmented_images_(images,
min_value=15, max_value=25)
assert np.all(imgs_clipped[:, 0, 0] == 15)
assert np.all(imgs_clipped[:, 0, 1] == 20)
assert np.all(imgs_clipped[:, 0, 2] == 25)
images = [np.zeros((1, 3), dtype=np.uint8) for _ in sm.xrange(2)]
for i in sm.xrange(len(images)):
images[i][0, 0] = 10
images[i][0, 1] = 20
images[i][0, 2] = 30
imgs_clipped = iaa.clip_augmented_images_(images,
min_value=15, max_value=25)
assert isinstance(imgs_clipped, list)
assert np.all([imgs_clipped[i][0, 0] == 15
for i in sm.xrange(len(images))])
assert np.all([imgs_clipped[i][0, 1] == 20
for i in sm.xrange(len(images))])
assert np.all([imgs_clipped[i][0, 2] == 25
for i in sm.xrange(len(images))])
assert len(caught_warnings) >= 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_clip_augmented_images():
warnings.resetwarnings()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
images = np.zeros((2, 1, 3), dtype=np.uint8)
images[:, 0, 0] = 10
images[:, 0, 1] = 20
images[:, 0, 2] = 30
imgs_clipped = iaa.clip_augmented_images(images,
min_value=15, max_value=25)
assert np.all(imgs_clipped[:, 0, 0] == 15)
assert np.all(imgs_clipped[:, 0, 1] == 20)
assert np.all(imgs_clipped[:, 0, 2] == 25)
images = [np.zeros((1, 3), dtype=np.uint8) for _ in sm.xrange(2)]
for i in sm.xrange(len(images)):
images[i][0, 0] = 10
images[i][0, 1] = 20
images[i][0, 2] = 30
imgs_clipped = iaa.clip_augmented_images(images,
min_value=15, max_value=25)
assert isinstance(imgs_clipped, list)
assert np.all([imgs_clipped[i][0, 0] == 15
for i in sm.xrange(len(images))])
assert np.all([imgs_clipped[i][0, 1] == 20
for i in sm.xrange(len(images))])
assert np.all([imgs_clipped[i][0, 2] ==
25 for i in sm.xrange(len(images))])
assert len(caught_warnings) >= 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_reduce_to_nonempty():
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1), ia.Keypoint(x=1, y=0)],
shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == [kpsois[0], kpsois[1], kpsois[3]]
assert ids == [0, 1, 3]
kpsois = [
ia.KeypointsOnImage([], shape=(4, 4, 3)),
ia.KeypointsOnImage([], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == []
assert ids == []
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3))
]
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == [kpsois[0]]
assert ids == [0]
kpsois = []
kpsois_reduced, ids = iaa.reduce_to_nonempty(kpsois)
assert kpsois_reduced == []
assert ids == []
def test_invert_reduce_to_nonempty():
kpsois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1),
ia.Keypoint(x=1, y=0)], shape=(4, 4, 3)),
ia.KeypointsOnImage([ia.Keypoint(x=2, y=2)], shape=(4, 4, 3)),
]
kpsois_recovered = iaa.invert_reduce_to_nonempty(
kpsois, [0, 1, 2], ["foo1", "foo2", "foo3"])
assert kpsois_recovered == ["foo1", "foo2", "foo3"]
kpsois_recovered = iaa.invert_reduce_to_nonempty(kpsois, [1], ["foo1"])
assert np.all([
isinstance(kpsoi, ia.KeypointsOnImage)
for kpsoi
in kpsois]) # assert original list not changed
assert kpsois_recovered == [kpsois[0], "foo1", kpsois[2]]
kpsois_recovered = iaa.invert_reduce_to_nonempty(kpsois, [], [])
assert kpsois_recovered == [kpsois[0], kpsois[1], kpsois[2]]
kpsois_recovered = iaa.invert_reduce_to_nonempty([], [], [])
assert kpsois_recovered == []
class _DummyAugmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def get_parameters(self):
return []
class _DummyAugmenterBBs(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images, random_state,
parents, hooks):
return [bbsoi.shift(x=1)
for bbsoi
in bounding_boxes_on_images]
def get_parameters(self):
return []
# TODO remove _augment_heatmaps() and _augment_keypoints() here once they are
# no longer abstract methods but default to noop
class _DummyAugmenterCallsParent(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return super(_DummyAugmenterCallsParent, self)\
._augment_images(images, random_state, parents, hooks)
def get_parameters(self):
return super(_DummyAugmenterCallsParent, self)\
.get_parameters()
def _same_rs(rs1, rs2):
return rs1.equals(rs2)
# TODO the test in here do not check everything, but instead only the cases
# that were not yet indirectly tested via other tests
class TestAugmenter(unittest.TestCase):
def setUp(self):
reseed()
def test___init___global_rng(self):
aug = _DummyAugmenter()
assert not aug.deterministic
assert aug.random_state.is_global_rng()
def test___init___deterministic(self):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
aug = _DummyAugmenter(deterministic=True)
assert aug.deterministic
assert not aug.random_state.is_global_rng()
assert len(caught_warnings) == 1
assert (
"is deprecated"
in str(caught_warnings[-1].message))
# old name for parameter `seed`
def test___init___random_state_is_rng(self):
rs = iarandom.RNG(123)
aug = _DummyAugmenter(seed=rs)
assert aug.random_state.generator is rs.generator
# old name for parameter `seed`
def test___init___random_state_is_seed(self):
aug = _DummyAugmenter(seed=123)
assert aug.random_state.equals(iarandom.RNG(123))
def test___init___seed_is_random_state(self):
rs = iarandom.RNG(123)
aug = _DummyAugmenter(seed=rs)
assert aug.random_state.generator is rs.generator
def test___init___seed_is_seed(self):
aug = _DummyAugmenter(seed=123)
assert aug.random_state.equals(iarandom.RNG(123))
def test_augment_images_called_probably_with_single_image(self):
aug = _DummyAugmenter()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = aug.augment_images(np.zeros((16, 32, 3), dtype=np.uint8))
assert len(caught_warnings) == 1
assert (
"indicates that you provided a single image with shape (H, W, C)"
in str(caught_warnings[-1].message)
)
def test_augment_images_array_in_list_out(self):
self._test_augment_images_array_in_list_out_varying_channels(
[3] * 20)
def test_augment_images_array_in_list_out_single_channel(self):
self._test_augment_images_array_in_list_out_varying_channels(
[1] * 20)
def test_augment_images_array_in_list_out_no_channels(self):
self._test_augment_images_array_in_list_out_varying_channels(
[None] * 20)
def test_augment_images_array_in_list_out_varying_channels(self):
self._test_augment_images_array_in_list_out_varying_channels(
["random"] * 20)
@classmethod
def _test_augment_images_array_in_list_out_varying_channels(cls,
nb_channels):
assert len(nb_channels) == 20
aug = iaa.Crop(((1, 8), (1, 8), (1, 8), (1, 8)), keep_size=False)
seen = [0, 0]
for nb_channels_i in nb_channels:
if nb_channels_i == "random":
channels = np.random.choice([None, 1, 3, 4, 9], size=(16,))
elif nb_channels_i is None:
channels = np.random.choice([None], size=(16,))
else:
channels = np.random.choice([nb_channels_i], size=(16,))
images = [np.zeros((64, 64), dtype=np.uint8)
if c is None
else np.zeros((64, 64, c), dtype=np.uint8)
for c in channels]
if nb_channels_i != "random":
images = np.array(images)
observed = aug.augment_images(images)
if ia.is_np_array(observed):
seen[0] += 1
else:
seen[1] += 1
for image, c in zip(observed, channels):
if c is None:
assert image.ndim == 2
else:
assert image.ndim == 3
assert image.shape[2] == c
assert 48 <= image.shape[0] <= 62
assert 48 <= image.shape[1] <= 62
assert seen[0] <= 3
assert seen[1] >= 17
def test_augment_images_with_2d_inputs(self):
base_img1 = np.array([[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 1, 1, 1]], dtype=np.uint8)
base_img2 = np.array([[0, 0, 1, 1],
[0, 1, 1, 1],
[0, 1, 0, 0]], dtype=np.uint8)
base_img1_flipped = np.array([[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0]], dtype=np.uint8)
base_img2_flipped = np.array([[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 0]], dtype=np.uint8)
images = np.array([base_img1, base_img2])
images_flipped = np.array([base_img1_flipped, base_img2_flipped])
images_list = [base_img1, base_img2]
images_flipped_list = [base_img1_flipped, base_img2_flipped]
images_list2d3d = [base_img1, base_img2[:, :, np.newaxis]]
images_flipped_list2d3d = [
base_img1_flipped,
base_img2_flipped[:, :, np.newaxis]]
aug = iaa.Fliplr(1.0)
noaug = iaa.Fliplr(0.0)
# one numpy array as input
observed = aug.augment_images(images)
assert np.array_equal(observed, images_flipped)
observed = noaug.augment_images(images)
assert np.array_equal(observed, images)
# list of 2d images
observed = aug.augment_images(images_list)
assert array_equal_lists(observed, images_flipped_list)
observed = noaug.augment_images(images_list)
assert array_equal_lists(observed, images_list)
# list of images, one 2d and one 3d
observed = aug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_flipped_list2d3d)
observed = noaug.augment_images(images_list2d3d)
assert array_equal_lists(observed, images_list2d3d)
def test_augment_keypoints_single_instance(self):
kpsoi = ia.KeypointsOnImage([ia.Keypoint(10, 10)], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints(kpsoi)
assert len(kpsoi_aug.keypoints) == 1
assert kpsoi_aug.keypoints[0].x == 11
def test_augment_keypoints_single_instance_rot90(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=5),
ia.Keypoint(x=3, y=3)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
kpsoi_aug = aug.augment_keypoints(kpsoi)
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
assert np.allclose(kpsoi_aug.keypoints[0].x, 5 - 2 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[0].y, 1)
assert np.allclose(kpsoi_aug.keypoints[1].x, 5 - 5 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[1].y, 2)
assert np.allclose(kpsoi_aug.keypoints[2].x, 5 - 3 + kp_offset)
assert np.allclose(kpsoi_aug.keypoints[2].y, 3)
def test_augment_keypoints_many_instances_rot90(self):
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=2, y=5),
ia.Keypoint(x=3, y=3)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
kpsoi_aug = aug.augment_keypoints([kpsoi, kpsoi, kpsoi])
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
for i in range(3):
assert np.allclose(kpsoi_aug[i].keypoints[0].x, 5 - 2 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[0].y, 1)
assert np.allclose(kpsoi_aug[i].keypoints[1].x, 5 - 5 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[1].y, 2)
assert np.allclose(kpsoi_aug[i].keypoints[2].x, 5 - 3 + kp_offset)
assert np.allclose(kpsoi_aug[i].keypoints[2].y, 3)
def test_augment_keypoints_empty_instance(self):
# test empty KeypointsOnImage objects
kpsoi = ia.KeypointsOnImage([], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints([kpsoi])
assert len(kpsoi_aug) == 1
assert len(kpsoi_aug[0].keypoints) == 0
def test_augment_keypoints_mixed_filled_and_empty_instances(self):
kpsoi1 = ia.KeypointsOnImage([], shape=(32, 32, 3))
kpsoi2 = ia.KeypointsOnImage([ia.Keypoint(10, 10)], shape=(32, 32, 3))
aug = iaa.Affine(translate_px={"x": 1})
kpsoi_aug = aug.augment_keypoints([kpsoi1, kpsoi2])
assert len(kpsoi_aug) == 2
assert len(kpsoi_aug[0].keypoints) == 0
assert len(kpsoi_aug[1].keypoints) == 1
assert kpsoi_aug[1].keypoints[0].x == 11
def test_augment_keypoints_aligned_despite_empty_instance(self):
# Test if augmenting lists of KeypointsOnImage is still aligned with
# image augmentation when one KeypointsOnImage instance is empty
# (no keypoints)
kpsoi_lst = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([], shape=(1, 8)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=1, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10)),
ia.KeypointsOnImage([ia.Keypoint(x=0, y=0)], shape=(1, 10))
]
image = np.zeros((1, 10), dtype=np.uint8)
image[0, 0] = 255
images = np.tile(image[np.newaxis, :, :], (len(kpsoi_lst), 1, 1))
aug = iaa.Affine(translate_px={"x": (0, 8)}, order=0, mode="constant",
cval=0)
for i in sm.xrange(10):
for is_list in [False, True]:
with self.subTest(i=i, is_list=is_list):
aug_det = aug.to_deterministic()
if is_list:
images_aug = aug_det.augment_images(list(images))
else:
images_aug = aug_det.augment_images(images)
kpsoi_lst_aug = aug_det.augment_keypoints(kpsoi_lst)
if is_list:
images_aug = np.array(images_aug, dtype=np.uint8)
translations_imgs = np.argmax(images_aug[:, 0, :], axis=1)
translations_kps = [
kpsoi.keypoints[0].x
if len(kpsoi.keypoints) > 0
else None
for kpsoi
in kpsoi_lst_aug]
assert len([kpresult
for kpresult
in translations_kps
if kpresult is None]) == 1
assert translations_kps[5] is None
translations_imgs = np.concatenate(
[translations_imgs[0:5], translations_imgs[6:]])
translations_kps = np.array(
translations_kps[0:5] + translations_kps[6:],
dtype=translations_imgs.dtype)
translations_kps[2] -= 1
translations_kps[8-1] -= 1
assert np.array_equal(translations_imgs, translations_kps)
def test_augment_keypoints_aligned_despite_nongeometric_image_ops(self):
# Verify for keypoints that adding augmentations that only
# affect images doesn't lead to misalignments between image
# and keypoint transformations
augs = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.AdditiveGaussianNoise(scale=(0.01, 0.1)),
iaa.Affine(translate_px={"x": (-10, 10), "y": (-10, 10)},
order=0, mode="constant", cval=0),
iaa.AddElementwise((0, 1)),
iaa.Flipud(0.5)
], random_order=True)
kps = [ia.Keypoint(x=15.5, y=12.5), ia.Keypoint(x=23.5, y=20.5),
ia.Keypoint(x=61.5, y=36.5), ia.Keypoint(x=47.5, y=32.5)]
kpsoi = ia.KeypointsOnImage(kps, shape=(50, 80, 4))
image = kpsoi.to_keypoint_image(size=1)
images = np.tile(image[np.newaxis, ...], (20, 1, 1, 1))
for _ in sm.xrange(50):
images_aug, kpsois_aug = augs(images=images,
keypoints=[kpsoi]*len(images))
for image_aug, kpsoi_aug in zip(images_aug, kpsois_aug):
kpsoi_recovered = ia.KeypointsOnImage.from_keypoint_image(
image_aug, nb_channels=4, threshold=100
)
for kp, kp_image in zip(kpsoi_aug.keypoints,
kpsoi_recovered.keypoints):
distance = np.sqrt((kp.x - kp_image.x)**2
+ (kp.y - kp_image.y)**2)
assert distance <= 1
def test_augment_bounding_boxes(self):
aug = _DummyAugmenterBBs()
bb = ia.BoundingBox(x1=1, y1=4, x2=2, y2=5)
bbs = [bb]
bbsois = [ia.BoundingBoxesOnImage(bbs, shape=(10, 10, 3))]
bbsois_aug = aug.augment_bounding_boxes(bbsois)
bb_aug = bbsois_aug[0].bounding_boxes[0]
assert bb_aug.x1 == 1+1
assert bb_aug.y1 == 4
assert bb_aug.x2 == 2+1
assert bb_aug.y2 == 5
def test_augment_bounding_boxes_empty_bboi(self):
aug = _DummyAugmenterBBs()
bbsois = [ia.BoundingBoxesOnImage([], shape=(10, 10, 3))]
bbsois_aug = aug.augment_bounding_boxes(bbsois)
assert len(bbsois_aug) == 1
assert bbsois_aug[0].bounding_boxes == []
def test_augment_bounding_boxes_empty_list(self):
aug = _DummyAugmenterBBs()
bbsois_aug = aug.augment_bounding_boxes([])
assert bbsois_aug == []
def test_augment_bounding_boxes_single_instance(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, x2=3, y1=4, y2=5),
ia.BoundingBox(x1=2.5, x2=3, y1=0, y2=2)
], shape=(5, 10, 3))
aug = iaa.Identity()
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
for bb_aug, bb in zip(bbsoi_aug.bounding_boxes, bbsoi.bounding_boxes):
assert np.allclose(bb_aug.x1, bb.x1)
assert np.allclose(bb_aug.x2, bb.x2)
assert np.allclose(bb_aug.y1, bb.y1)
assert np.allclose(bb_aug.y2, bb.y2)
def test_augment_bounding_boxes_single_instance_rot90(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, x2=3, y1=4, y2=5),
ia.BoundingBox(x1=2.5, x2=3, y1=0, y2=2)
], shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
# Note here that the new coordinates are minima/maxima of the BB, so
# not as straight forward to compute the new coords as for keypoint
# augmentation
bb0 = bbsoi_aug.bounding_boxes[0]
bb1 = bbsoi_aug.bounding_boxes[1]
assert np.allclose(bb0.x1, 5 - 5 + kp_offset)
assert np.allclose(bb0.x2, 5 - 4 + kp_offset)
assert np.allclose(bb0.y1, 1)
assert np.allclose(bb0.y2, 3)
assert np.allclose(bb1.x1, 5 - 2 + kp_offset)
assert np.allclose(bb1.x2, 5 - 0 + kp_offset)
assert np.allclose(bb1.y1, 2.5)
assert np.allclose(bb1.y2, 3)
def test_augment_bounding_box_list_of_many_instances(self):
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, x2=3, y1=4, y2=5),
ia.BoundingBox(x1=2.5, x2=3, y1=0, y2=2)
], shape=(5, 10, 3))
aug = iaa.Rot90(1, keep_size=False)
bbsoi_aug = aug.augment_bounding_boxes([bbsoi, bbsoi, bbsoi])
# set offset to -1 if Rot90 uses int-based coordinate transformation
kp_offset = 0
for i in range(3):
bb0 = bbsoi_aug[i].bounding_boxes[0]
bb1 = bbsoi_aug[i].bounding_boxes[1]
assert np.allclose(bb0.x1, 5 - 5 + kp_offset)
assert np.allclose(bb0.x2, 5 - 4 + kp_offset)
assert np.allclose(bb0.y1, 1)
assert np.allclose(bb0.y2, 3)
assert np.allclose(bb1.x1, 5 - 2 + kp_offset)
assert np.allclose(bb1.x2, 5 - 0 + kp_offset)
assert np.allclose(bb1.y1, 2.5)
assert np.allclose(bb1.y2, 3)
def test_augment_heatmaps_noop_single_heatmap(self):
heatmap_arr = np.linspace(0.0, 1.0, num=4*4).reshape((4, 4, 1))
heatmap = ia.HeatmapsOnImage(heatmap_arr.astype(np.float32),
shape=(4, 4, 3))
aug = iaa.Identity()
heatmap_aug = aug.augment_heatmaps(heatmap)
assert np.allclose(heatmap_aug.arr_0to1, heatmap.arr_0to1)
def test_augment_heatmaps_rot90_single_heatmap(self):
heatmap_arr = np.linspace(0.0, 1.0, num=4*4).reshape((4, 4, 1))
heatmap = ia.HeatmapsOnImage(heatmap_arr.astype(np.float32),
shape=(4, 4, 3))
aug = iaa.Rot90(1, keep_size=False)
heatmap_aug = aug.augment_heatmaps(heatmap)
assert np.allclose(heatmap_aug.arr_0to1, np.rot90(heatmap.arr_0to1, -1))
def test_augment_heatmaps_rot90_list_of_many_heatmaps(self):
heatmap_arr = np.linspace(0.0, 1.0, num=4*4).reshape((4, 4, 1))
heatmap = ia.HeatmapsOnImage(heatmap_arr.astype(np.float32),
shape=(4, 4, 3))
aug = iaa.Rot90(1, keep_size=False)
heatmaps_aug = aug.augment_heatmaps([heatmap] * 3)
for hm in heatmaps_aug:
assert np.allclose(hm.arr_0to1, np.rot90(heatmap.arr_0to1, -1))
def test_legacy_fallback_to_kp_aug_for_cbaois(self):
class _LegacyAugmenter(iaa.Augmenter):
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
return [kpsoi.shift(x=1) for kpsoi in keypoints_on_images]
def get_parameters(self):
return []
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=1, y1=2, x2=3, y2=4)
], shape=(4, 5, 3))
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (1, 0), (1, 1)])
], shape=(4, 5, 3))
lsoi = ia.LineStringsOnImage([
ia.LineString([(0, 0), (1, 0), (1, 1)])
], shape=(4, 5, 3))
aug = _LegacyAugmenter()
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
psoi_aug = aug.augment_polygons(psoi)
lsoi_aug = aug.augment_line_strings(lsoi)
assert bbsoi_aug[0].coords_almost_equals(bbsoi[0].shift(x=1))
assert psoi_aug[0].coords_almost_equals(psoi[0].shift(x=1))
assert lsoi_aug[0].coords_almost_equals(lsoi[0].shift(x=1))
def test_localize_random_state(self):
aug = _DummyAugmenter()
aug_localized = aug.localize_random_state()
assert aug_localized is not aug
assert aug.random_state.is_global_rng()
assert not aug_localized.random_state.is_global_rng()
def test_seed_(self):
aug1 = _DummyAugmenter()
aug2 = _DummyAugmenter().to_deterministic()
aug0 = iaa.Sequential([aug1, aug2])
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.seed_()
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
def test_seed__deterministic_too(self):
aug1 = _DummyAugmenter()
aug2 = _DummyAugmenter().to_deterministic()
aug0 = iaa.Sequential([aug1, aug2])
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.seed_(deterministic_too=True)
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert not _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
def test_seed__with_integer(self):
aug1 = _DummyAugmenter()
aug2 = _DummyAugmenter().to_deterministic()
aug0 = iaa.Sequential([aug1, aug2])
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.seed_(123)
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0_copy.random_state, iarandom.RNG(123))
expected = iarandom.RNG(123).derive_rng_()
assert _same_rs(aug0_copy[0].random_state, expected)
def test_seed__with_rng(self):
aug1 = _DummyAugmenter()
aug2 = _DummyAugmenter().to_deterministic()
aug0 = iaa.Sequential([aug1, aug2])
aug0_copy = aug0.deepcopy()
assert _same_rs(aug0.random_state, aug0_copy.random_state)
assert _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
aug0_copy.seed_(iarandom.RNG(123))
assert not _same_rs(aug0.random_state, aug0_copy.random_state)
assert not _same_rs(aug0[0].random_state, aug0_copy[0].random_state)
assert _same_rs(aug0[1].random_state, aug0_copy[1].random_state)
assert _same_rs(aug0_copy.random_state,
iarandom.RNG(123))
expected = iarandom.RNG(123).derive_rng_()
assert _same_rs(aug0_copy[0].random_state, expected)
def test_get_parameters(self):
# test for "raise NotImplementedError"
aug = _DummyAugmenterCallsParent()
with self.assertRaises(NotImplementedError):
aug.get_parameters()
def test_get_all_children_flat(self):
aug1 = _DummyAugmenter()
aug21 = _DummyAugmenter()
aug2 = iaa.Sequential([aug21])
aug0 = iaa.Sequential([aug1, aug2])
children = aug0.get_all_children(flat=True)
assert isinstance(children, list)
assert children[0] == aug1
assert children[1] == aug2
assert children[2] == aug21
def test_get_all_children_not_flat(self):
aug1 = _DummyAugmenter()
aug21 = _DummyAugmenter()
aug2 = iaa.Sequential([aug21])
aug0 = iaa.Sequential([aug1, aug2])
children = aug0.get_all_children(flat=False)
assert isinstance(children, list)
assert children[0] == aug1
assert children[1] == aug2
assert isinstance(children[2], list)
assert children[2][0] == aug21
def test___repr___and___str__(self):
class DummyAugmenterRepr(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
return heatmaps
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
return keypoints_on_images
def get_parameters(self):
return ["A", "B", "C"]
aug1 = DummyAugmenterRepr(name="Example")
aug2 = DummyAugmenterRepr(name="Example").to_deterministic()
expected1 = (
"DummyAugmenterRepr("
"name=Example, parameters=[A, B, C], deterministic=False"
")")
expected2 = (
"DummyAugmenterRepr("
"name=Example, parameters=[A, B, C], deterministic=True"
")")
assert aug1.__repr__() == aug1.__str__() == expected1
assert aug2.__repr__() == aug2.__str__() == expected2
# -----------
# lambda functions used in Test TestAugmenter_augment_batches
# in test method test_augment_batches_with_many_different_augmenters().
# They are here instead of in the test method, because otherwise there were
# issues with spawn mode not being able to pickle functions,
# see issue #414.
def _augment_batches__lambda_func_images(
images, random_state, parents, hooks):
return images
def _augment_batches__lambda_func_keypoints(
keypoints_on_images, random_state, parents, hooks):
return keypoints_on_images
def _augment_batches__assertlambda_func_images(
images, random_state, parents, hooks):
return True
def _augment_batches__assertlambda_func_keypoints(
keypoints_on_images, random_state, parents, hooks):
return True
# -----------
class TestAugmenter_augment_batches(unittest.TestCase):
def setUp(self):
reseed()
def test_augment_batches_list_of_empty_list_deprecated(self):
with warnings.catch_warnings(record=True) as caught_warnings:
aug = _DummyAugmenter()
batches_aug = list(aug.augment_batches([[]]))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert isinstance(batches_aug[0], list)
assert len(caught_warnings) == 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_augment_batches_list_of_arrays_deprecated(self):
with warnings.catch_warnings(record=True) as caught_warnings:
aug = _DummyAugmenter()
image_batches = [np.zeros((1, 2, 2, 3), dtype=np.uint8)]
batches_aug = list(aug.augment_batches(image_batches))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert array_equal_lists(batches_aug, image_batches)
assert len(caught_warnings) == 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_augment_batches_list_of_list_of_arrays_deprecated(self):
with warnings.catch_warnings(record=True) as caught_warnings:
aug = _DummyAugmenter()
image_batches = [[np.zeros((2, 2, 3), dtype=np.uint8),
np.zeros((2, 3, 3))]]
batches_aug = list(aug.augment_batches(image_batches))
assert isinstance(batches_aug, list)
assert len(batches_aug) == 1
assert array_equal_lists(batches_aug[0], image_batches[0])
assert len(caught_warnings) == 1
assert "deprecated" in str(caught_warnings[-1].message)
def test_augment_batches_invalid_datatype(self):
aug = _DummyAugmenter()
with self.assertRaises(Exception):
_ = list(aug.augment_batches(None))
def test_augment_batches_list_of_invalid_datatype(self):
aug = _DummyAugmenter()
got_exception = False
try:
_ = list(aug.augment_batches([None]))
except Exception as exc:
got_exception = True
assert "Unknown datatype of batch" in str(exc)
assert got_exception
def test_augment_batches_list_of_list_of_invalid_datatype(self):
aug = _DummyAugmenter()
got_exception = False
try:
_ = list(aug.augment_batches([[None]]))
except Exception as exc:
got_exception = True
assert "Unknown datatype in batch[0]" in str(exc)
assert got_exception
def test_augment_batches_batch_with_list_of_images(self):
image = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
image_flipped = np.fliplr(image)
keypoint = ia.Keypoint(x=2, y=1)
keypoints = [ia.KeypointsOnImage([keypoint], shape=image.shape + (1,))]
kp_flipped = ia.Keypoint(
x=image.shape[1]-keypoint.x,
y=keypoint.y
)
# basic functionality test (images as list)
for bg in [True, False]:
seq = iaa.Fliplr(1.0)
batches = [ia.Batch(images=[np.copy(image)], keypoints=keypoints)]
batches_aug = list(seq.augment_batches(batches, background=bg))
baug0 = batches_aug[0]
assert np.array_equal(baug0.images_aug[0], image_flipped)
assert baug0.keypoints_aug[0].keypoints[0].x == kp_flipped.x
assert baug0.keypoints_aug[0].keypoints[0].y == kp_flipped.y
assert np.array_equal(baug0.images_unaug[0], image)
assert baug0.keypoints_unaug[0].keypoints[0].x == keypoint.x
assert baug0.keypoints_unaug[0].keypoints[0].y == keypoint.y
def test_augment_batches_batch_with_array_of_images(self):
image = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
image_flipped = np.fliplr(image)
keypoint = ia.Keypoint(x=2, y=1)
keypoints = [ia.KeypointsOnImage([keypoint], shape=image.shape + (1,))]
kp_flipped = ia.Keypoint(
x=image.shape[1]-keypoint.x,
y=keypoint.y
)
# basic functionality test (images as array)
for bg in [True, False]:
seq = iaa.Fliplr(1.0)
batches = [ia.Batch(images=np.uint8([np.copy(image)]),
keypoints=keypoints)]
batches_aug = list(seq.augment_batches(batches, background=bg))
baug0 = batches_aug[0]
assert np.array_equal(baug0.images_aug, np.uint8([image_flipped]))
assert baug0.keypoints_aug[0].keypoints[0].x == kp_flipped.x
assert baug0.keypoints_aug[0].keypoints[0].y == kp_flipped.y
assert np.array_equal(baug0.images_unaug, np.uint8([image]))
assert baug0.keypoints_unaug[0].keypoints[0].x == keypoint.x
assert baug0.keypoints_unaug[0].keypoints[0].y == keypoint.y
def test_augment_batches_background(self):
image = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
image_flipped = np.fliplr(image)
kps = ia.Keypoint(x=2, y=1)
kpsoi = ia.KeypointsOnImage([kps], shape=image.shape + (1,))
kp_flipped = ia.Keypoint(
x=image.shape[1]-kps.x,
y=kps.y
)
seq = iaa.Fliplr(0.5)
for bg, as_array in itertools.product([False, True], [False, True]):
# with images as list
nb_flipped_images = 0
nb_flipped_keypoints = 0
nb_iterations = 1000
images = (
np.uint8([np.copy(image)])
if as_array
else [np.copy(image)])
batches = [
ia.Batch(images=images,
keypoints=[kpsoi.deepcopy()])
for _ in sm.xrange(nb_iterations)
]
batches_aug = list(seq.augment_batches(batches, background=bg))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
keypoint_aug = batch_aug.keypoints_aug[0].keypoints[0]
img_matches_unflipped = np.array_equal(image_aug, image)
img_matches_flipped = np.array_equal(image_aug, image_flipped)
assert img_matches_unflipped or img_matches_flipped
if img_matches_flipped:
nb_flipped_images += 1
kp_matches_unflipped = (
np.isclose(keypoint_aug.x, kps.x)
and np.isclose(keypoint_aug.y, kps.y))
kp_matches_flipped = (
np.isclose(keypoint_aug.x, kp_flipped.x)
and np.isclose(keypoint_aug.y, kp_flipped.y))
assert kp_matches_flipped or kp_matches_unflipped
if kp_matches_flipped:
nb_flipped_keypoints += 1
assert 0.4*nb_iterations <= nb_flipped_images <= 0.6*nb_iterations
assert nb_flipped_images == nb_flipped_keypoints
def test_augment_batches_with_many_different_augmenters(self):
image = np.array([[0, 0, 1, 1, 1],
[0, 0, 1, 1, 1],
[0, 1, 1, 1, 1]], dtype=np.uint8)
keypoint = ia.Keypoint(x=2, y=1)
keypoints = [ia.KeypointsOnImage([keypoint], shape=image.shape + (1,))]
augs = [
iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
iaa.Sometimes(1.0, iaa.Fliplr(1)),
iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
iaa.WithChannels([0], iaa.Add((-50, 50))),
iaa.Identity(name="Identity-nochange"),
iaa.Lambda(
func_images=_augment_batches__lambda_func_images,
func_keypoints=_augment_batches__lambda_func_keypoints,
name="Lambda-nochange"
),
iaa.AssertLambda(
func_images=_augment_batches__assertlambda_func_images,
func_keypoints=_augment_batches__assertlambda_func_keypoints,
name="AssertLambda-nochange"
),
iaa.AssertShape(
(None, 64, 64, 3),
check_keypoints=False,
name="AssertShape-nochange"
),
iaa.Resize((0.5, 0.9)),
iaa.CropAndPad(px=(-50, 50)),
iaa.Pad(px=(1, 50)),
iaa.Crop(px=(1, 50)),
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
iaa.ChangeColorspace(to_colorspace="GRAY"),
iaa.Grayscale(alpha=(0.1, 1.0)),
iaa.GaussianBlur(1.0),
iaa.AverageBlur(5),
iaa.MedianBlur(5),
iaa.Convolve(np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])),
iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
iaa.EdgeDetect(alpha=(0.1, 1.0)),
iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
iaa.Add((-50, 50)),
iaa.AddElementwise((-50, 50)),
iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
iaa.Multiply((0.6, 1.4)),
iaa.MultiplyElementwise((0.6, 1.4)),
iaa.Dropout((0.3, 0.5)),
iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
iaa.Invert(0.5),
iaa.Affine(
scale=(0.7, 1.3),
translate_percent=(-0.1, 0.1),
rotate=(-20, 20),
shear=(-20, 20),
order=ia.ALL,
mode=ia.ALL,
cval=(0, 255)),
iaa.PiecewiseAffine(scale=(0.1, 0.3)),
iaa.ElasticTransformation(alpha=2.0)
]
nb_iterations = 100
image = ia.quokka(size=(64, 64))
batches = [ia.Batch(images=[np.copy(image)],
keypoints=[keypoints[0].deepcopy()])
for _ in sm.xrange(nb_iterations)]
for aug in augs:
nb_changed = 0
batches_aug = list(aug.augment_batches(batches, background=True))
for batch_aug in batches_aug:
image_aug = batch_aug.images_aug[0]
if (image.shape != image_aug.shape
or not np.array_equal(image, image_aug)):
nb_changed += 1
if nb_changed > 10:
break
if "-nochange" not in aug.name:
assert nb_changed > 0
else:
assert nb_changed == 0
class TestAugmenter_augment_batch(unittest.TestCase):
def test_deprecation(self):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
aug = _InplaceDummyAugmenterImgsArray(1)
batch = ia.UnnormalizedBatch(
images=np.zeros((1, 1, 1, 3), dtype=np.uint8))
_batch_aug = aug.augment_batch(batch)
assert len(caught_warnings) == 1
assert "is deprecated" in str(caught_warnings[0].message)
def test_augments_correctly_images(self):
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
aug = _InplaceDummyAugmenterImgsArray(1)
batch = ia.UnnormalizedBatch(images=images)
batch_aug = aug.augment_batch(batch)
image_unaug = batch_aug.images_unaug[0, :, :, :]
image_aug = batch_aug.images_aug[0, :, :, :]
assert batch_aug is batch
assert batch_aug.images_aug is not batch.images_unaug
assert batch_aug.images_aug is not batch_aug.images_unaug
assert np.array_equal(image, image_cp)
assert np.array_equal(image_unaug, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
class TestAugmenter_augment_batch_(unittest.TestCase):
def setUp(self):
reseed()
def test_verify_inplace_aug__imgs__unnormalized_batch(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
aug = _InplaceDummyAugmenterImgsArray(1)
batch = ia.UnnormalizedBatch(images=images)
batch_aug = aug.augment_batch_(batch)
image_unaug = batch_aug.images_unaug[0, :, :, :]
image_aug = batch_aug.images_aug[0, :, :, :]
assert batch_aug is batch
assert batch_aug.images_aug is not batch.images_unaug
assert batch_aug.images_aug is not batch_aug.images_unaug
assert np.array_equal(image, image_cp)
assert np.array_equal(image_unaug, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
def test_verify_inplace_aug__imgs__normalized_batch(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
aug = _InplaceDummyAugmenterImgsArray(1)
batch = ia.Batch(images=images)
batch_aug = aug.augment_batch_(batch)
image_unaug = batch_aug.images_unaug[0, :, :, :]
image_aug = batch_aug.images_aug[0, :, :, :]
assert batch_aug is batch
assert batch_aug.images_aug is not batch.images_unaug
assert batch_aug.images_aug is not batch_aug.images_unaug
assert np.array_equal(image, image_cp)
assert np.array_equal(image_unaug, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
def test_verify_inplace_aug__imgs__batchinaug(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
aug = _InplaceDummyAugmenterImgsArray(1)
batch = _BatchInAugmentation(images=images)
batch_aug = aug.augment_batch_(batch)
image_aug = batch_aug.images[0, :, :, :]
assert batch_aug is batch
assert batch_aug.images is batch.images
assert not np.array_equal(image, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
def test_verify_inplace_aug__segmaps__normalized_batch(self):
segmap_arr = np.zeros((10, 20, 3), dtype=np.int32)
segmap_arr[3:6, 3:9] = 1
segmap = ia.SegmentationMapsOnImage(segmap_arr, shape=(10, 20, 3))
segmap_cp = ia.SegmentationMapsOnImage(np.copy(segmap_arr),
shape=(10, 20, 3))
aug = _InplaceDummyAugmenterSegMaps(1)
batch = ia.Batch(segmentation_maps=[segmap])
batch_aug = aug.augment_batch_(batch)
segmap_unaug = batch_aug.segmentation_maps_unaug[0]
segmap_aug = batch_aug.segmentation_maps_aug[0]
assert batch_aug is batch
assert (batch_aug.segmentation_maps_aug
is not batch.segmentation_maps_unaug)
assert (batch_aug.segmentation_maps_aug
is not batch_aug.segmentation_maps_unaug)
assert np.array_equal(segmap.get_arr(), segmap_cp.get_arr())
assert np.array_equal(segmap_unaug.get_arr(), segmap_cp.get_arr())
assert np.array_equal(segmap_aug.get_arr(), segmap_cp.get_arr() + 1)
def test_verify_inplace_aug__keypoints_normalized_batch(self):
kpsoi = ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=(10, 20, 3))
kpsoi_cp = ia.KeypointsOnImage([ia.Keypoint(x=1, y=2)],
shape=(10, 20, 3))
aug = _InplaceDummyAugmenterKeypoints(x=1, y=3)
batch = ia.Batch(keypoints=[kpsoi])
batch_aug = aug.augment_batch_(batch)
kpsoi_unaug = batch_aug.keypoints_unaug[0]
kpsoi_aug = batch_aug.keypoints_aug[0]
assert batch_aug is batch
assert (batch_aug.keypoints_aug
is not batch.keypoints_unaug)
assert (batch_aug.keypoints_aug
is not batch_aug.keypoints_unaug)
assert np.allclose(kpsoi.to_xy_array(), kpsoi_cp.to_xy_array())
assert np.allclose(kpsoi_unaug.to_xy_array(), kpsoi_cp.to_xy_array())
assert np.allclose(kpsoi_aug.to_xy_array()[:, 0],
kpsoi_cp.to_xy_array()[:, 0] + 1)
assert np.allclose(kpsoi_aug.to_xy_array()[:, 1],
kpsoi_cp.to_xy_array()[:, 1] + 3)
def test_call_changes_global_rng_state(self):
state_before = copy.deepcopy(iarandom.get_global_rng().state)
aug = iaa.Rot90(k=(0, 3))
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
batch = ia.UnnormalizedBatch(images=[image])
_batch_aug = aug.augment_batch_(batch)
state_after = iarandom.get_global_rng().state
assert repr(state_before) != repr(state_after)
def test_multiple_calls_produce_not_the_same_results(self):
aug = iaa.Rot90(k=(0, 3))
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
nb_images = 1000
batch1 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch2 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch3 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch_aug1 = aug.augment_batch_(batch1)
batch_aug2 = aug.augment_batch_(batch2)
batch_aug3 = aug.augment_batch_(batch3)
assert batch_aug1 is not batch_aug2
assert batch_aug1 is not batch_aug2
assert batch_aug2 is not batch_aug3
nb_equal = [0, 0, 0]
for image_aug1, image_aug2, image_aug3 in zip(batch_aug1.images_aug,
batch_aug2.images_aug,
batch_aug3.images_aug):
nb_equal[0] += int(np.array_equal(image_aug1, image_aug2))
nb_equal[1] += int(np.array_equal(image_aug1, image_aug3))
nb_equal[2] += int(np.array_equal(image_aug2, image_aug3))
assert nb_equal[0] < (0.25 + 0.1) * nb_images
assert nb_equal[1] < (0.25 + 0.1) * nb_images
assert nb_equal[2] < (0.25 + 0.1) * nb_images
def test_calls_affect_other_augmenters_with_global_rng(self):
# with calling aug1
iarandom.seed(1)
aug1 = iaa.Rot90(k=(0, 3))
aug2 = iaa.Add((0, 255))
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
nb_images = 50
batch1 = ia.UnnormalizedBatch(images=[image] * 1)
batch2 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch_aug11 = aug1.augment_batch_(batch1)
batch_aug12 = aug2.augment_batch_(batch2)
# with calling aug1, repetition (to see that seed() works)
iarandom.seed(1)
aug1 = iaa.Rot90(k=(0, 3))
aug2 = iaa.Add((0, 255))
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
nb_images = 50
batch1 = ia.UnnormalizedBatch(images=[image] * 1)
batch2 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch_aug21 = aug1.augment_batch_(batch1)
batch_aug22 = aug2.augment_batch_(batch2)
# without calling aug1
iarandom.seed(1)
aug2 = iaa.Add((0, 255))
image = np.arange(4*4).astype(np.uint8).reshape((4, 4))
nb_images = 50
batch2 = ia.UnnormalizedBatch(images=[image] * nb_images)
batch_aug32 = aug2.augment_batch_(batch2)
# comparison
assert np.array_equal(
np.array(batch_aug12.images_aug, dtype=np.uint8),
np.array(batch_aug22.images_aug, dtype=np.uint8)
)
assert not np.array_equal(
np.array(batch_aug12.images_aug, dtype=np.uint8),
np.array(batch_aug32.images_aug, dtype=np.uint8)
)
class TestAugmenter_augment_segmentation_maps(unittest.TestCase):
def setUp(self):
reseed()
def test_augment_segmentation_maps_single_instance(self):
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Identity()
segmap_aug = aug.augment_segmentation_maps(segmap)
assert np.array_equal(segmap_aug.arr, segmap.arr)
def test_augment_segmentation_maps_list_of_single_instance(self):
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Identity()
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
assert np.array_equal(segmap_aug.arr, segmap.arr)
def test_augment_segmentation_maps_affine(self):
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Affine(translate_px={"x": 1})
segmap_aug = aug.augment_segmentation_maps(segmap)
expected = np.int32([
[0, 0, 1],
[0, 0, 1],
[0, 0, 1]
])
expected = expected[:, :, np.newaxis]
assert np.array_equal(segmap_aug.arr, expected)
def test_augment_segmentation_maps_pad(self):
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
segmap_aug = aug.augment_segmentation_maps(segmap)
expected = np.int32([
[0, 0, 0],
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
expected = expected[:, :, np.newaxis]
assert np.array_equal(segmap_aug.arr, expected)
def test_augment_segmentation_maps_pad_some_classes_not_provided(self):
# only classes 0 and 3
arr = np.int32([
[0, 3, 3],
[0, 3, 3],
[0, 3, 3]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
segmap_aug = aug.augment_segmentation_maps(segmap)
expected = np.int32([
[0, 0, 0],
[0, 3, 3],
[0, 3, 3],
[0, 3, 3]
])
expected = expected[:, :, np.newaxis]
assert np.array_equal(segmap_aug.arr, expected)
def test_augment_segmentation_maps_pad_only_background_class(self):
# only class 0
arr = np.int32([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
])
segmap = ia.SegmentationMapsOnImage(arr, shape=(3, 3))
aug = iaa.Pad(px=(1, 0, 0, 0), keep_size=False)
segmap_aug = aug.augment_segmentation_maps(segmap)
expected = np.int32([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
])
expected = expected[:, :, np.newaxis]
assert np.array_equal(segmap_aug.arr, expected)
def test_augment_segmentation_maps_multichannel_rot90(self):
segmap = ia.SegmentationMapsOnImage(
np.arange(0, 4*4).reshape((4, 4, 1)).astype(np.int32),
shape=(4, 4, 3)
)
aug = iaa.Rot90(1, keep_size=False)
segmaps_aug = aug.augment_segmentation_maps([segmap, segmap, segmap])
for i in range(3):
assert np.allclose(segmaps_aug[i].arr, np.rot90(segmap.arr, -1))
class TestAugmenter_draw_grid(unittest.TestCase):
def setUp(self):
reseed()
def test_draw_grid_list_of_3d_arrays(self):
# list, shape (3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid([image], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_list_of_2d_arrays(self):
# list, shape (3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid([image[..., 0]], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
def test_draw_grid_list_of_1d_arrays_fails(self):
# list, shape (2,)
aug = _DummyAugmenter()
with self.assertRaises(Exception):
_ = aug.draw_grid([np.zeros((2,), dtype=np.uint8)], rows=2, cols=2)
def test_draw_grid_4d_array(self):
# array, shape (1, 3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(np.uint8([image]), rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_3d_array(self):
# array, shape (3, 3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(image, rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, grid_expected)
def test_draw_grid_2d_array(self):
# array, shape (3, 3)
aug = _DummyAugmenter()
image = np.zeros((3, 3, 3), dtype=np.uint8)
image[0, 0, :] = 10
image[0, 1, :] = 50
image[1, 1, :] = 255
grid = aug.draw_grid(image[..., 0], rows=2, cols=2)
grid_expected = np.vstack([
np.hstack([image[..., 0:1], image[..., 0:1]]),
np.hstack([image[..., 0:1], image[..., 0:1]])
])
grid_expected = np.tile(grid_expected, (1, 1, 3))
assert np.array_equal(grid, grid_expected)
def test_draw_grid_1d_array(self):
# array, shape (2,)
aug = _DummyAugmenter()
with self.assertRaises(Exception):
_ = aug.draw_grid(np.zeros((2,), dtype=np.uint8), rows=2, cols=2)
@six.add_metaclass(ABCMeta)
class _TestAugmenter_augment_cbaois(object):
"""Class that is used to test augment_polygons() and augment_line_strings().
Originally this was only used for polygons and then made more flexible.
This is why some descriptions are still geared towards polygons.
Abbreviations:
cba = coordinate based augmentable, e.g. Polygon
cbaoi = coordinate based augmentable on image, e.g. PolygonsOnImage
"""
def setUp(self):
reseed()
@abstractmethod
def _augfunc(self, augmenter, *args, **kwargs):
"""Return augmenter.augment_*(...)."""
@property
@abstractmethod
def _ObjClass(self):
"""Return Polygon, LineString or similar class."""
@property
@abstractmethod
def _ObjOnImageClass(self):
"""Return PolygonsOnImage, LineStringsOnImage or similar class."""
def _Obj(self, *args, **kwargs):
return self._ObjClass(*args, **kwargs)
def _ObjOnImage(self, *args, **kwargs):
return self._ObjOnImageClass(*args, **kwargs)
def _compare_coords_of_cba(self, observed, expected, atol=1e-4, rtol=0):
return np.allclose(observed, expected, atol=atol, rtol=rtol)
def test_single_empty_instance(self):
# single instance of PolygonsOnImage with 0 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaoi = self._ObjOnImage([], shape=(10, 11, 3))
cbaoi_aug = self._augfunc(aug, cbaoi)
assert isinstance(cbaoi_aug, self._ObjOnImageClass)
assert cbaoi_aug.empty
assert cbaoi_aug.shape == (11, 10, 3)
def test_list_of_single_empty_instance(self):
# list of PolygonsOnImage with 0 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaoi = self._ObjOnImage([], shape=(10, 11, 3))
cbaois_aug = self._augfunc(aug, [cbaoi])
assert isinstance(cbaois_aug, list)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert cbaois_aug[0].empty
assert cbaois_aug[0].shape == (11, 10, 3)
def test_two_cbaois_each_two_cbas(self):
# 2 PolygonsOnImage, each 2 polygons
aug = iaa.Rot90(1, keep_size=False)
cbaois = [
self._ObjOnImage(
[self._Obj([(0, 0), (5, 0), (5, 5)]),
self._Obj([(1, 1), (6, 1), (6, 6)])],
shape=(10, 10, 3)),
self._ObjOnImage(
[self._Obj([(2, 2), (7, 2), (7, 7)]),
self._Obj([(3, 3), (8, 3), (8, 8)])],
shape=(10, 10, 3)),
]
cbaois_aug = self._augfunc(aug, cbaois)
assert isinstance(cbaois_aug, list)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert isinstance(cbaois_aug[0], self._ObjOnImageClass)
assert len(cbaois_aug[0].items) == 2
assert len(cbaois_aug[1].items) == 2
kp_offset = 0
assert self._compare_coords_of_cba(
cbaois_aug[0].items[0].coords,
[(10-0+kp_offset, 0), (10-0+kp_offset, 5), (10-5+kp_offset, 5)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[0].items[1].coords,
[(10-1+kp_offset, 1), (10-1+kp_offset, 6), (10-6+kp_offset, 6)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[1].items[0].coords,
[(10-2+kp_offset, 2), (10-2+kp_offset, 7), (10-7+kp_offset, 7)],
atol=1e-4, rtol=0
)
assert self._compare_coords_of_cba(
cbaois_aug[1].items[1].coords,
[(10-3+kp_offset, 3), (10-3+kp_offset, 8), (10-8+kp_offset, 8)],
atol=1e-4, rtol=0
)
assert cbaois_aug[0].shape == (10, 10, 3)
assert cbaois_aug[1].shape == (10, 10, 3)
def test_randomness_between_and_within_batches(self):
# test whether there is randomness within each batch and between
# batches
aug = iaa.Rot90((0, 3), keep_size=False)
cba = self._Obj([(0, 0), (5, 0), (5, 5)])
cbaoi = self._ObjOnImage(
[cba.deepcopy() for _ in sm.xrange(1)],
shape=(10, 11, 3)
)
cbaois = [cbaoi.deepcopy() for _ in sm.xrange(100)]
cbaois_aug1 = self._augfunc(aug, cbaois)
cbaois_aug2 = self._augfunc(aug, cbaois)
# --> different between runs
cbas1 = [cba
for cbaoi in cbaois_aug1
for cba in cbaoi.items]
cbas2 = [cba
for cbaoi in cbaois_aug2
for cba in cbaoi.items]
assert len(cbas1) == len(cbas2)
same = []
for cba1, cba2 in zip(cbas1, cbas2):
points1 = np.float32(cba1.coords)
points2 = np.float32(cba2.coords)
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> different between PolygonOnImages
same = []
points1 = np.float32([cba.coords
for cba
in cbaois_aug1[0].items])
for cba in cbaois_aug1[1:]:
points2 = np.float32([cba.coords
for cba
in cba.items])
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> different between polygons
points1 = set()
for cba in cbaois_aug1[0].items:
for point in cba.coords:
points1.add(tuple(
[int(point[0]*10), int(point[1]*10)]
))
assert len(points1) > 1
def test_determinism(self):
aug = iaa.Rot90((0, 3), keep_size=False)
aug_det = aug.to_deterministic()
cba = self._Obj([(0, 0), (5, 0), (5, 5)])
cbaoi = self._ObjOnImage(
[cba.deepcopy() for _ in sm.xrange(1)],
shape=(10, 11, 3)
)
cbaois = [cbaoi.deepcopy() for _ in sm.xrange(100)]
cbaois_aug1 = self._augfunc(aug_det, cbaois)
cbaois_aug2 = self._augfunc(aug_det, cbaois)
# --> different between PolygonsOnImages
same = []
points1 = np.float32([cba.coords
for cba
in cbaois_aug1[0].items])
for cbaoi in cbaois_aug1[1:]:
points2 = np.float32([cba.coords
for cba
in cbaoi.items])
same.append(self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0))
assert not np.all(same)
# --> similar between augmentation runs
cbas1 = [cba
for cbaoi in cbaois_aug1
for cba in cbaoi.items]
cbas2 = [cba
for cbaoi in cbaois_aug2
for cba in cbaoi.items]
assert len(cbas1) == len(cbas2)
for cba1, cba2 in zip(cbas1, cbas2):
points1 = np.float32(cba1.coords)
points2 = np.float32(cba2.coords)
assert self._compare_coords_of_cba(points1, points2,
atol=1e-2, rtol=0)
def test_aligned_with_images(self):
aug = iaa.Rot90((0, 3), keep_size=False)
aug_det = aug.to_deterministic()
image = np.zeros((10, 20), dtype=np.uint8)
image[5, :] = 255
image[2:5, 10] = 255
image_rots = [iaa.Rot90(k, keep_size=False).augment_image(image)
for k in [0, 1, 2, 3]]
cba = self._Obj([(0, 0), (10, 0), (10, 20)])
kp_offs = 0 # offset
cbas_rots = [
[(0, 0), (10, 0), (10, 20)],
[(10-0+kp_offs, 0), (10-0+kp_offs, 10), (10-20+kp_offs, 10)],
[(20-0+kp_offs, 10), (20-10+kp_offs, 10), (20-10+kp_offs, -10)],
[(10-10+kp_offs, 20), (10-10+kp_offs, 10), (10-(-10)+kp_offs, 10)]
]
cbaois = [self._ObjOnImage([cba], shape=image.shape)
for _ in sm.xrange(50)]
images_aug = aug_det.augment_images([image] * 50)
cbaois_aug = self._augfunc(aug_det, cbaois)
seen = set()
for image_aug, cbaoi_aug in zip(images_aug, cbaois_aug):
found_image = False
for img_rot_idx, img_rot in enumerate(image_rots):
if (image_aug.shape == img_rot.shape
and np.allclose(image_aug, img_rot)):
found_image = True
break
found_cba = False
for poly_rot_idx, cba_rot in enumerate(cbas_rots):
coords_observed = cbaoi_aug.items[0].coords
if self._compare_coords_of_cba(coords_observed, cba_rot):
found_cba = True
break
assert found_image
assert found_cba
assert img_rot_idx == poly_rot_idx
seen.add((img_rot_idx, poly_rot_idx))
assert 2 <= len(seen) <= 4 # assert not always the same rot
def test_aligned_with_images_despite_empty_instances(self):
# Test if augmenting lists of e.g. PolygonsOnImage is still aligned
# with image augmentation when one e.g. PolygonsOnImage instance is
# empty (e.g. contains no polygons)
cba = self._Obj([(0, 0), (5, 0), (5, 5), (0, 5)])
cbaoi_lst = [
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.shift(x=1)], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([], shape=(1, 8)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.shift(x=1)], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20)),
self._ObjOnImage([cba.deepcopy()], shape=(10, 20))
]
image = np.zeros((10, 20), dtype=np.uint8)
image[0, 0] = 255
image[0, 5] = 255
image[5, 5] = 255
image[5, 0] = 255
images = np.tile(image[np.newaxis, :, :], (len(cbaoi_lst), 1, 1))
aug = iaa.Affine(translate_px={"x": (0, 8)}, order=0, mode="constant",
cval=0)
for _ in sm.xrange(10):
for is_list in [False, True]:
aug_det = aug.to_deterministic()
inputs = images
if is_list:
inputs = list(inputs)
images_aug = aug_det.augment_images(inputs)
cbaoi_aug_lst = self._augfunc(aug_det, cbaoi_lst)
if is_list:
images_aug = np.array(images_aug, dtype=np.uint8)
translations_imgs = np.argmax(images_aug[:, 0, :], axis=1)
translations_points = [
(cbaoi.items[0].coords[0][0] if not cbaoi.empty else None)
for cbaoi
in cbaoi_aug_lst]
assert len([
pointresult for
pointresult
in translations_points
if pointresult is None
]) == 1
assert translations_points[5] is None
translations_imgs = np.concatenate(
[translations_imgs[0:5], translations_imgs[6:]])
translations_points = np.array(
translations_points[0:5] + translations_points[6:],
dtype=translations_imgs.dtype)
translations_points[2] -= 1
translations_points[8-1] -= 1
assert np.array_equal(translations_imgs, translations_points)
# This is the same as _ConcavePolygonRecoverer, but we make sure that we
# always sample random values. This is to advance the state of random_state
# and ensure that this breaks not alignment.
class _DummyRecoverer(_ConcavePolygonRecoverer):
def recover_from(self, new_exterior, old_polygon, random_state=0):
# sample lots of values to ensure that the RNG is advanced
_ = random_state.integers(0, 2**30, 100)
return super(_DummyRecoverer, self).recover_from(
new_exterior, old_polygon, random_state=random_state)
class _DummyAugmenterWithRecoverer(iaa.Augmenter):
def __init__(self, use_recoverer=True):
super(_DummyAugmenterWithRecoverer, self).__init__()
self.random_samples_images = []
self.random_samples_kps = []
if use_recoverer:
self.recoverer = _DummyRecoverer()
else:
self.recoverer = None
def _augment_images(self, images, random_state, parents, hooks):
sample = random_state.integers(0, 2**30)
self.random_samples_images.append(sample)
return images
def _augment_polygons(self, polygons_on_images, random_state, parents,
hooks):
return self._augment_polygons_as_keypoints(
polygons_on_images, random_state, parents, hooks,
recoverer=self.recoverer)
def _augment_keypoints(self, keypoints_on_images, random_state, parents,
hooks):
sample = random_state.integers(0, 2**30)
self.random_samples_kps.append(sample)
assert len(keypoints_on_images) in [1, 2]
assert len(keypoints_on_images[0].keypoints) == 7
result = []
for _ in keypoints_on_images:
# every second call of _augment_polygons()...
if len(self.random_samples_kps) % 2 == 1:
# not concave
kpsoi = ia.KeypointsOnImage([
ia.Keypoint(x=0, y=0),
ia.Keypoint(x=10, y=0),
ia.Keypoint(x=10, y=4),
ia.Keypoint(x=-1, y=5),
ia.Keypoint(x=10, y=6),
ia.Keypoint(x=10, y=10),
ia.Keypoint(x=0, y=10)
], shape=(10, 10, 3))
else:
# concave
kpsoi = ia.KeypointsOnImage([
ia.Keypoint(x=0, y=0),
ia.Keypoint(x=10, y=0),
ia.Keypoint(x=10, y=4),
ia.Keypoint(x=10, y=5),
ia.Keypoint(x=10, y=6),
ia.Keypoint(x=10, y=10),
ia.Keypoint(x=0, y=10)
], shape=(10, 10, 3))
result.append(kpsoi)
return result
def get_parameters(self):
return []
class TestAugmenter_augment_polygons(_TestAugmenter_augment_cbaois,
unittest.TestCase):
def _augfunc(self, augmenter, *args, **kwargs):
return augmenter.augment_polygons(*args, **kwargs)
@property
def _ObjClass(self):
return ia.Polygon
@property
def _ObjOnImageClass(self):
return ia.PolygonsOnImage
def _coords(self, obj):
return obj.exterior
def _entities(self, obj_on_image):
return obj_on_image.polygons
def test_polygon_recoverer(self):
# This is mostly a dummy polygon. The augmenter always returns the
# same non-concave polygon.
poly = ia.Polygon([(0, 0), (10, 0),
(10, 4), (10, 5), (10, 6),
(10, 10), (0, 10)])
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
aug = _DummyAugmenterWithRecoverer()
psoi_aug = aug.augment_polygons(psoi)
poly_aug = psoi_aug.polygons[0]
bb = ia.BoundingBox(x1=0, y1=0, x2=10, y2=10)
bb_aug = ia.BoundingBox(
x1=np.min(poly_aug.exterior[:, 0]),
y1=np.min(poly_aug.exterior[:, 1]),
x2=np.max(poly_aug.exterior[:, 0]),
y2=np.max(poly_aug.exterior[:, 1])
)
assert bb.iou(bb_aug) > 0.9
assert psoi_aug.polygons[0].is_valid
def test_polygon_aligned_without_recoverer(self):
# This is mostly a dummy polygon. The augmenter always returns the
# same non-concave polygon.
poly = ia.Polygon([(0, 0), (10, 0),
(10, 4), (10, 5), (10, 6),
(10, 10), (0, 10)])
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
image = np.zeros((10, 10, 3))
aug = _DummyAugmenterWithRecoverer(use_recoverer=False)
images_aug1, psois_aug1 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug2, psois_aug2 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug3, psois_aug3 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug4, psois_aug4 = aug(images=[image, image],
polygons=[psoi, psoi])
assert not psois_aug1[0].polygons[0].is_valid
assert not psois_aug1[1].polygons[0].is_valid
assert psois_aug2[0].polygons[0].is_valid
assert psois_aug2[1].polygons[0].is_valid
assert not psois_aug3[0].polygons[0].is_valid
assert not psois_aug3[1].polygons[0].is_valid
assert psois_aug4[0].polygons[0].is_valid
assert psois_aug4[1].polygons[0].is_valid
assert aug.random_samples_images == aug.random_samples_kps
def test_polygon_aligned_with_recoverer(self):
# This is mostly a dummy polygon. The augmenter always returns the
# same non-concave polygon.
poly = ia.Polygon([(0, 0), (10, 0),
(10, 4), (10, 5), (10, 6),
(10, 10), (0, 10)])
psoi = ia.PolygonsOnImage([poly], shape=(10, 10, 3))
image = np.zeros((10, 10, 3))
aug = _DummyAugmenterWithRecoverer(use_recoverer=True)
images_aug1, psois_aug1 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug2, psois_aug2 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug3, psois_aug3 = aug(images=[image, image],
polygons=[psoi, psoi])
images_aug4, psois_aug4 = aug(images=[image, image],
polygons=[psoi, psoi])
assert psois_aug1[0].polygons[0].is_valid
assert psois_aug1[1].polygons[0].is_valid
assert psois_aug2[0].polygons[0].is_valid
assert psois_aug2[1].polygons[0].is_valid
assert psois_aug3[0].polygons[0].is_valid
assert psois_aug3[1].polygons[0].is_valid
assert psois_aug4[0].polygons[0].is_valid
assert psois_aug4[1].polygons[0].is_valid
assert aug.random_samples_images == aug.random_samples_kps
class TestAugmenter_augment_line_strings(_TestAugmenter_augment_cbaois,
unittest.TestCase):
def _augfunc(self, augmenter, *args, **kwargs):
return augmenter.augment_line_strings(*args, **kwargs)
@property
def _ObjClass(self):
return ia.LineString
@property
def _ObjOnImageClass(self):
return ia.LineStringsOnImage
class TestAugmenter_augment_bounding_boxes(_TestAugmenter_augment_cbaois,
unittest.TestCase):
def _augfunc(self, augmenter, *args, **kwargs):
return augmenter.augment_bounding_boxes(*args, **kwargs)
@property
def _ObjClass(self):
return ia.BoundingBox
@property
def _ObjOnImageClass(self):
return ia.BoundingBoxesOnImage
def _Obj(self, *args, **kwargs):
assert len(args) == 1
coords = np.float32(args[0]).reshape((-1, 2))
x1 = np.min(coords[:, 0])
y1 = np.min(coords[:, 1])
x2 = np.max(coords[:, 0])
y2 = np.max(coords[:, 1])
return self._ObjClass(x1=x1, y1=y1, x2=x2, y2=y2, **kwargs)
def _compare_coords_of_cba(self, observed, expected, atol=1e-4, rtol=0):
observed = np.float32(observed).reshape((-1, 2))
expected = np.float32(expected).reshape((-1, 2))
assert observed.shape[0] == 2
assert expected.shape[1] == 2
obs_x1 = np.min(observed[:, 0])
obs_y1 = np.min(observed[:, 1])
obs_x2 = np.max(observed[:, 0])
obs_y2 = np.max(observed[:, 1])
exp_x1 = np.min(expected[:, 0])
exp_y1 = np.min(expected[:, 1])
exp_x2 = np.max(expected[:, 0])
exp_y2 = np.max(expected[:, 1])
return np.allclose(
[obs_x1, obs_y1, obs_x2, obs_y2],
[exp_x1, exp_y1, exp_x2, exp_y2],
atol=atol, rtol=rtol)
# the method is mostly tested indirectly, so very few tests here
class TestAugmenter_augment_bounding_boxes_by_keypoints(unittest.TestCase):
def test_x_min_max(self):
# ensure that min() and max() are applied to augmented x-coordinates
# when they are converted back to BBs
class _ShiftingXCoordAugmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images,
random_state, parents, hooks):
return self._augment_bounding_boxes_as_keypoints(
bounding_boxes_on_images, random_state, parents, hooks)
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
keypoints_on_images[0].keypoints[0].x += 10
keypoints_on_images[0].keypoints[1].x -= 10
return keypoints_on_images
def get_parameters(self):
return []
aug = _ShiftingXCoordAugmenter()
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)], shape=(10, 10, 3))
observed = aug(bounding_boxes=bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(2-10, 1), (0+10, 3)]
)
def test_y_min_max(self):
# ensure that min() and max() are applied to augmented y-coordinates
# when they are converted back to BBs
class _ShiftingYCoordAugmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images,
random_state, parents, hooks):
return self._augment_bounding_boxes_as_keypoints(
bounding_boxes_on_images, random_state, parents, hooks)
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
keypoints_on_images[0].keypoints[0].y += 10
keypoints_on_images[0].keypoints[1].y -= 10
return keypoints_on_images
def get_parameters(self):
return []
aug = _ShiftingYCoordAugmenter()
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)], shape=(10, 10, 3))
observed = aug(bounding_boxes=bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(0, 1-10), (2, 1+10)]
)
def test_x1_x2_can_get_flipped(self):
# ensure that augmented x-coordinates where x1>x2 are flipped
# before creating BBs from them
class _FlippingX1X2Augmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images,
random_state, parents, hooks):
return self._augment_bounding_boxes_as_keypoints(
bounding_boxes_on_images, random_state, parents, hooks)
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
keypoints_on_images[0].keypoints[0].x += 10 # top left
keypoints_on_images[0].keypoints[3].x += 10 # bottom left
return keypoints_on_images
def get_parameters(self):
return []
aug = _FlippingX1X2Augmenter()
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)], shape=(10, 10, 3))
observed = aug(bounding_boxes=bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(2, 1), (0+10, 3)]
)
def test_y1_y2_can_get_flipped(self):
# ensure that augmented y-coordinates where y1>y2 are flipped
# before creating BBs from them
class _FlippingY1Y2Augmenter(iaa.Augmenter):
def _augment_images(self, images, random_state, parents, hooks):
return images
def _augment_bounding_boxes(self, bounding_boxes_on_images,
random_state, parents, hooks):
return self._augment_bounding_boxes_as_keypoints(
bounding_boxes_on_images, random_state, parents, hooks)
def _augment_keypoints(self, keypoints_on_images, random_state,
parents, hooks):
keypoints_on_images[0].keypoints[0].y += 10 # top left
keypoints_on_images[0].keypoints[1].y += 10 # top right
return keypoints_on_images
def get_parameters(self):
return []
aug = _FlippingY1Y2Augmenter()
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)], shape=(10, 10, 3))
observed = aug(bounding_boxes=bbsoi)
assert np.allclose(
observed.bounding_boxes[0].coords,
[(0, 3), (2, 1+10)]
)
class TestAugmenter_augment(unittest.TestCase):
def setUp(self):
reseed()
def test_image(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
image_aug = aug.augment(image=image)
assert image_aug.shape == image.shape
assert np.array_equal(image_aug, image)
def test_images_list(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
images_aug = aug.augment(images=[image])
assert images_aug[0].shape == image.shape
assert np.array_equal(images_aug[0], image)
def test_images_and_heatmaps(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
images_aug, heatmaps_aug = aug.augment(images=[image],
heatmaps=[heatmaps])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
def test_images_and_segmentation_maps(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
images_aug, segmaps_aug = aug.augment(images=[image],
segmentation_maps=[segmaps])
assert np.array_equal(images_aug[0], image)
assert np.allclose(segmaps_aug[0].arr, segmaps.arr)
def test_images_and_keypoints(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
images_aug, keypoints_aug = aug.augment(images=[image],
keypoints=[keypoints])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(keypoints_aug[0], keypoints)
def test_images_and_polygons(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
images_aug, polygons_aug = aug.augment(images=[image],
polygons=[polygons])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(polygons_aug[0], polygons)
def test_images_and_line_strings(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
psoi = ia.quokka_polygons((128, 128), extract="square")
lsoi = ia.LineStringsOnImage([
psoi.polygons[0].to_line_string(closed=False)
], shape=psoi.shape)
images_aug, lsoi_aug = aug.augment(images=[image],
line_strings=[lsoi])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(lsoi_aug[0], lsoi)
def test_images_and_bounding_boxes(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
bbs = ia.quokka_bounding_boxes((128, 128), extract="square")
images_aug, bbs_aug = aug.augment(images=[image], bounding_boxes=[bbs])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(bbs_aug[0], bbs)
def test_image_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
batch = aug.augment(image=image, return_batch=True)
image_aug = batch.images_aug[0]
assert np.array_equal(image, image_aug)
def test_images_and_heatmaps_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
batch = aug.augment(images=[image], heatmaps=[heatmaps],
return_batch=True)
images_aug = batch.images_aug
heatmaps_aug = batch.heatmaps_aug
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
def test_images_and_segmentation_maps_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
batch = aug.augment(images=[image], segmentation_maps=[segmaps],
return_batch=True)
images_aug = batch.images_aug
segmaps_aug = batch.segmentation_maps_aug
assert np.array_equal(images_aug[0], image)
assert np.allclose(segmaps_aug[0].arr, segmaps.arr)
def test_images_and_keypoints_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
batch = aug.augment(images=[image], keypoints=[keypoints],
return_batch=True)
images_aug = batch.images_aug
keypoints_aug = batch.keypoints_aug
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(keypoints_aug[0], keypoints)
def test_images_and_polygons_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
batch = aug.augment(images=[image], polygons=[polygons],
return_batch=True)
images_aug = batch.images_aug
polygons_aug = batch.polygons_aug
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(polygons_aug[0], polygons)
def test_images_and_line_strings_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
psoi = ia.quokka_polygons((128, 128), extract="square")
lsoi = ia.LineStringsOnImage([
psoi.polygons[0].to_line_string(closed=False)
], shape=psoi.shape)
batch = aug.augment(images=[image], line_strings=[lsoi],
return_batch=True)
images_aug = batch.images_aug
lsoi_aug = batch.line_strings_aug
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(lsoi_aug[0], lsoi)
def test_images_and_bounding_boxes_return_batch(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
bbs = ia.quokka_bounding_boxes((128, 128), extract="square")
batch = aug.augment(images=[image], bounding_boxes=[bbs],
return_batch=True)
images_aug = batch.images_aug
bbs_aug = batch.bounding_boxes_aug
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(bbs_aug[0], bbs)
def test_non_image_data(self):
aug = iaa.Identity()
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
batch = aug.augment(segmentation_maps=[segmaps], keypoints=[keypoints],
polygons=[polygons], return_batch=True)
segmaps_aug = batch.segmentation_maps_aug
keypoints_aug = batch.keypoints_aug
polygons_aug = batch.polygons_aug
assert np.allclose(segmaps_aug[0].arr, segmaps.arr)
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(polygons_aug[0], polygons)
def test_non_image_data_unexpected_args_order(self):
aug = iaa.Identity()
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
batch = aug.augment(polygons=[polygons], segmentation_maps=[segmaps],
keypoints=[keypoints], return_batch=True)
segmaps_aug = batch.segmentation_maps_aug
keypoints_aug = batch.keypoints_aug
polygons_aug = batch.polygons_aug
assert np.allclose(segmaps_aug[0].arr, segmaps.arr)
assert np.allclose(keypoints_aug[0].to_xy_array(),
keypoints.to_xy_array())
for polygon_aug, polygon in zip(polygons_aug[0].polygons,
polygons.polygons):
assert polygon_aug.exterior_almost_equals(polygon)
def test_with_affine(self):
# make sure that augment actually does something
aug = iaa.Affine(translate_px={"x": 1}, order=0, mode="constant",
cval=0)
image = np.zeros((4, 4, 1), dtype=np.uint8) + 255
heatmaps = np.ones((1, 4, 4, 1), dtype=np.float32)
segmaps = np.ones((1, 4, 4, 1), dtype=np.int32)
kps = [(0, 0), (1, 2)]
bbs = [(0, 0, 1, 1), (1, 2, 2, 3)]
polygons = [(0, 0), (1, 0), (1, 1)]
ls = [(0, 0), (1, 0), (1, 1)]
image_aug = aug.augment(image=image)
_, heatmaps_aug = aug.augment(image=image, heatmaps=heatmaps)
_, segmaps_aug = aug.augment(image=image, segmentation_maps=segmaps)
_, kps_aug = aug.augment(image=image, keypoints=kps)
_, bbs_aug = aug.augment(image=image, bounding_boxes=bbs)
_, polygons_aug = aug.augment(image=image, polygons=polygons)
_, ls_aug = aug.augment(image=image, line_strings=ls)
# all augmentables must have been moved to the right by 1px
assert np.all(image_aug[:, 0] == 0)
assert np.all(image_aug[:, 1:] == 255)
assert np.allclose(heatmaps_aug[0][:, 0], 0.0)
assert np.allclose(heatmaps_aug[0][:, 1:], 1.0)
assert np.all(segmaps_aug[0][:, 0] == 0)
assert np.all(segmaps_aug[0][:, 1:] == 1)
assert np.allclose(kps_aug, [(1, 0), (2, 2)])
assert np.allclose(bbs_aug, [(1, 0, 2, 1), (2, 2, 3, 3)])
assert np.allclose(polygons_aug, [(1, 0), (2, 0), (2, 1)])
assert np.allclose(ls_aug, [(1, 0), (2, 0), (2, 1)])
def test_alignment(self):
# make sure that changes from augment() are aligned and vary between
# call
aug = iaa.Affine(translate_px={"x": (0, 100)}, order=0, mode="constant",
cval=0)
image = np.zeros((1, 100, 1), dtype=np.uint8) + 255
heatmaps = np.ones((1, 1, 100, 1), dtype=np.float32)
segmaps = np.ones((1, 1, 100, 1), dtype=np.int32)
kps = [(0, 0)]
bbs = [(0, 0, 1, 1)]
polygons = [(0, 0), (1, 0), (1, 1)]
ls = [(0, 0), (1, 0), (1, 1)]
seen = []
for _ in sm.xrange(10):
batch_aug = aug.augment(image=image, heatmaps=heatmaps,
segmentation_maps=segmaps, keypoints=kps,
bounding_boxes=bbs, polygons=polygons,
line_strings=ls, return_batch=True)
shift_image = np.sum(batch_aug.images_aug[0][0, :] == 0)
shift_heatmaps = np.sum(
np.isclose(batch_aug.heatmaps_aug[0][0, :, 0], 0.0))
shift_segmaps = np.sum(
batch_aug.segmentation_maps_aug[0][0, :, 0] == 0)
shift_kps = batch_aug.keypoints_aug[0][0]
shift_bbs = batch_aug.bounding_boxes_aug[0][0]
shift_polygons = batch_aug.polygons_aug[0][0]
shift_ls = batch_aug.line_strings_aug[0][0]
assert len({shift_image, shift_heatmaps, shift_segmaps,
shift_kps, shift_bbs, shift_polygons,
shift_ls}) == 1
seen.append(shift_image)
assert len(set(seen)) > 7
def test_alignment_and_same_outputs_in_deterministic_mode(self):
# make sure that changes from augment() are aligned
# and do NOT vary if the augmenter was already in deterministic mode
aug = iaa.Affine(translate_px={"x": (0, 100)}, order=0, mode="constant",
cval=0)
aug = aug.to_deterministic()
image = np.zeros((1, 100, 1), dtype=np.uint8) + 255
heatmaps = np.ones((1, 1, 100, 1), dtype=np.float32)
segmaps = np.ones((1, 1, 100, 1), dtype=np.int32)
kps = [(0, 0)]
bbs = [(0, 0, 1, 1)]
polygons = [(0, 0), (1, 0), (1, 1)]
ls = [(0, 0), (1, 0), (1, 1)]
seen = []
for _ in sm.xrange(10):
batch_aug = aug.augment(image=image, heatmaps=heatmaps,
segmentation_maps=segmaps, keypoints=kps,
bounding_boxes=bbs, polygons=polygons,
line_strings=ls,
return_batch=True)
shift_image = np.sum(batch_aug.images_aug[0][0, :] == 0)
shift_heatmaps = np.sum(
np.isclose(batch_aug.heatmaps_aug[0][0, :, 0], 0.0))
shift_segmaps = np.sum(
batch_aug.segmentation_maps_aug[0][0, :, 0] == 0)
shift_kps = batch_aug.keypoints_aug[0][0]
shift_bbs = batch_aug.bounding_boxes_aug[0][0]
shift_polygons = batch_aug.polygons_aug[0][0]
shift_ls = batch_aug.line_strings_aug[0][0]
assert len({shift_image, shift_heatmaps, shift_segmaps,
shift_kps, shift_bbs, shift_polygons,
shift_ls}) == 1
seen.append(shift_image)
assert len(set(seen)) == 1
def test_arrays_become_lists_if_augmenter_changes_shapes(self):
# make sure that arrays (of images, heatmaps, segmaps) get split to
# lists of arrays if the augmenter changes shapes in non-uniform
# (between images) ways
# we augment 100 images here with rotation of either 0deg or 90deg
# and do not resize back to the original image size afterwards, so
# shapes change
aug = iaa.Rot90([0, 1], keep_size=False)
# base_arr is (100, 1, 2) array, each containing [[0, 1]]
base_arr = np.tile(np.arange(1*2).reshape((1, 2))[np.newaxis, :, :],
(100, 1, 1))
images = np.copy(base_arr)[:, :, :, np.newaxis].astype(np.uint8)
heatmaps = (
np.copy(base_arr)[:, :, :, np.newaxis].astype(np.float32)
/ np.max(base_arr)
)
segmaps = np.copy(base_arr)[:, :, :, np.newaxis].astype(np.int32)
batch_aug = aug.augment(images=images, heatmaps=heatmaps,
segmentation_maps=segmaps,
return_batch=True)
assert isinstance(batch_aug.images_aug, list)
assert isinstance(batch_aug.heatmaps_aug, list)
assert isinstance(batch_aug.segmentation_maps_aug, list)
shapes_images = [arr.shape for arr in batch_aug.images_aug]
shapes_heatmaps = [arr.shape for arr in batch_aug.heatmaps_aug]
shapes_segmaps = [arr.shape for arr in batch_aug.segmentation_maps_aug]
assert (
[shape[0:2] for shape in shapes_images]
== [shape[0:2] for shape in shapes_heatmaps]
== [shape[0:2] for shape in shapes_segmaps]
)
assert len(set(shapes_images)) == 2
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_none_of_them_images(self):
aug = iaa.Identity()
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
keypoints_aug, polygons_aug = aug.augment(keypoints=[keypoints],
polygons=[polygons])
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(polygons_aug[0], polygons)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_none_of_them_images_inverted(self):
aug = iaa.Identity()
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
polygons_aug, keypoints_aug = aug.augment(polygons=[polygons],
keypoints=[keypoints])
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(polygons_aug[0], polygons)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_heatmaps(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
heatmaps_aug, images_aug = aug.augment(heatmaps=[heatmaps],
images=[image])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_segmaps(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
segmaps_aug, images_aug = aug.augment(segmentation_maps=[segmaps],
images=[image])
assert np.array_equal(images_aug[0], image)
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_keypoints(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
keypoints_aug, images_aug = aug.augment(keypoints=[keypoints],
images=[image])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(keypoints_aug[0], keypoints)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_bbs(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
bbs = ia.quokka_bounding_boxes((128, 128), extract="square")
bbs_aug, images_aug = aug.augment(bounding_boxes=[bbs],
images=[image])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(bbs_aug[0], bbs)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_polygons(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
polygons_aug, images_aug = aug.augment(polygons=[polygons],
images=[image])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(polygons_aug[0], polygons)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_two_outputs_inverted_order_line_strings(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
psoi = ia.quokka_polygons((128, 128), extract="square")
lsoi = ia.LineStringsOnImage([
psoi.polygons[0].to_line_string(closed=False)
], shape=psoi.shape)
lsoi_aug, images_aug = aug.augment(line_strings=[lsoi],
images=[image])
assert np.array_equal(images_aug[0], image)
assert_cbaois_equal(lsoi_aug[0], lsoi)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_three_inputs_expected_order(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
images_aug, heatmaps_aug, segmaps_aug = aug.augment(
images=[image],
heatmaps=[heatmaps],
segmentation_maps=[segmaps])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_three_inputs_expected_order2(self):
aug = iaa.Identity()
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
segmaps_aug, keypoints_aug, polygons_aug = aug.augment(
segmentation_maps=[segmaps],
keypoints=[keypoints],
polygons=[polygons])
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(polygons_aug[0], polygons)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_three_inputs_inverted_order(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
segmaps_aug, heatmaps_aug, images_aug = aug.augment(
segmentation_maps=[segmaps],
heatmaps=[heatmaps],
images=[image])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_three_inputs_inverted_order2(self):
aug = iaa.Identity()
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
polygons_aug, keypoints_aug, segmaps_aug = aug.augment(
polygons=[polygons],
keypoints=[keypoints],
segmentation_maps=[segmaps])
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(polygons_aug[0], polygons)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_all_inputs_expected_order(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
bbs = ia.quokka_bounding_boxes((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
lsoi = ia.LineStringsOnImage([
polygons.polygons[0].to_line_string(closed=False)
], shape=polygons.shape)
images_aug, heatmaps_aug, segmaps_aug, keypoints_aug, bbs_aug, \
polygons_aug, lsoi_aug = aug.augment(
images=[image],
heatmaps=[heatmaps],
segmentation_maps=[segmaps],
keypoints=[keypoints],
bounding_boxes=[bbs],
polygons=[polygons],
line_strings=[lsoi])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(bbs_aug[0], bbs)
assert_cbaois_equal(polygons_aug[0], polygons)
assert_cbaois_equal(lsoi_aug[0], lsoi)
@unittest.skipIf(not IS_PY36_OR_HIGHER,
"Behaviour is only supported in python 3.6+")
def test_py_gte_36_all_inputs_inverted_order(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
keypoints = ia.quokka_keypoints((128, 128), extract="square")
bbs = ia.quokka_bounding_boxes((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
lsoi = ia.LineStringsOnImage([
polygons.polygons[0].to_line_string(closed=False)
], shape=polygons.shape)
lsoi_aug, polygons_aug, bbs_aug, keypoints_aug, segmaps_aug, \
heatmaps_aug, images_aug = aug.augment(
line_strings=[lsoi],
polygons=[polygons],
bounding_boxes=[bbs],
keypoints=[keypoints],
segmentation_maps=[segmaps],
heatmaps=[heatmaps],
images=[image])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
assert np.array_equal(segmaps_aug[0].arr, segmaps.arr)
assert_cbaois_equal(keypoints_aug[0], keypoints)
assert_cbaois_equal(bbs_aug[0], bbs)
assert_cbaois_equal(polygons_aug[0], polygons)
assert_cbaois_equal(lsoi_aug[0], lsoi)
@unittest.skipIf(IS_PY36_OR_HIGHER,
"Test checks behaviour for python <=3.5")
def test_py_lte_35_calls_without_images_fail(self):
aug = iaa.Identity()
keypoints = ia.quokka_keypoints((128, 128), extract="square")
polygons = ia.quokka_polygons((128, 128), extract="square")
got_exception = False
try:
_ = aug.augment(keypoints=[keypoints], polygons=[polygons])
except Exception as exc:
msg = "Requested two outputs from augment() that were not 'images'"
assert msg in str(exc)
got_exception = True
assert got_exception
@unittest.skipIf(IS_PY36_OR_HIGHER,
"Test checks behaviour for python <=3.5")
def test_py_lte_35_calls_with_more_than_three_args_fail(self):
aug = iaa.Identity()
image = ia.quokka((128, 128), extract="square")
heatmaps = ia.quokka_heatmap((128, 128), extract="square")
segmaps = ia.quokka_segmentation_map((128, 128), extract="square")
got_exception = False
try:
_ = aug.augment(images=[image], heatmaps=[heatmaps],
segmentation_maps=[segmaps])
except Exception as exc:
assert "Requested more than two outputs" in str(exc)
got_exception = True
assert got_exception
class TestAugmenter___call__(unittest.TestCase):
def setUp(self):
reseed()
def test_with_two_augmentables(self):
image = ia.quokka(size=(128, 128), extract="square")
heatmaps = ia.quokka_heatmap(size=(128, 128), extract="square")
images_aug, heatmaps_aug = iaa.Identity()(images=[image],
heatmaps=[heatmaps])
assert np.array_equal(images_aug[0], image)
assert np.allclose(heatmaps_aug[0].arr_0to1, heatmaps.arr_0to1)
class TestAugmenter_pool(unittest.TestCase):
def setUp(self):
reseed()
def test_pool(self):
augseq = iaa.Identity()
mock_Pool = mock.MagicMock()
mock_Pool.return_value = mock_Pool
mock_Pool.__enter__.return_value = None
mock_Pool.__exit__.return_value = None
with mock.patch("imgaug.multicore.Pool", mock_Pool):
with augseq.pool(processes=2, maxtasksperchild=10, seed=17):
pass
assert mock_Pool.call_count == 1
assert mock_Pool.__enter__.call_count == 1
assert mock_Pool.__exit__.call_count == 1
assert mock_Pool.call_args[0][0] == augseq
assert mock_Pool.call_args[1]["processes"] == 2
assert mock_Pool.call_args[1]["maxtasksperchild"] == 10
assert mock_Pool.call_args[1]["seed"] == 17
class TestAugmenter_find_augmenters_by_name(unittest.TestCase):
def setUp(self):
reseed()
@property
def seq(self):
noop1 = iaa.Identity(name="Identity")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Identity(name="Identity2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
return seq1, seq2
def test_find_top_element(self):
seq1, seq2 = self.seq
augs = seq1.find_augmenters_by_name("Seq")
assert len(augs) == 1
assert augs[0] == seq1
def test_find_nested_element(self):
seq1, seq2 = self.seq
augs = seq1.find_augmenters_by_name("Seq2")
assert len(augs) == 1
assert augs[0] == seq2
def test_find_list_of_names(self):
seq1, seq2 = self.seq
augs = seq1.find_augmenters_by_names(["Seq", "Seq2"])
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
def test_find_by_regex(self):
seq1, seq2 = self.seq
augs = seq1.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
class TestAugmenter_find_augmenters(unittest.TestCase):
def setUp(self):
reseed()
@property
def seq(self):
noop1 = iaa.Identity(name="Identity")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Identity(name="Identity2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
return seq1, seq2
def test_find_by_list_of_names(self):
def _func(aug, parents):
return aug.name in ["Seq", "Seq2"]
seq1, seq2 = self.seq
augs = seq1.find_augmenters(_func)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == seq2
def test_use_parents_arg(self):
def _func(aug, parents):
return (
aug.name in ["Seq", "Seq2"]
and len(parents) > 0
)
seq1, seq2 = self.seq
augs = seq1.find_augmenters(_func)
assert len(augs) == 1
assert augs[0] == seq2
def test_find_by_list_of_names_flat_false(self):
def _func(aug, parents):
return aug.name in ["Seq", "Seq2"]
seq1, seq2 = self.seq
augs = seq1.find_augmenters(_func, flat=False)
assert len(augs) == 2
assert augs[0] == seq1
assert augs[1] == [seq2]
class TestAugmenter_remove(unittest.TestCase):
def setUp(self):
reseed()
@property
def seq(self):
noop1 = iaa.Identity(name="Identity")
fliplr = iaa.Fliplr(name="Fliplr")
flipud = iaa.Flipud(name="Flipud")
noop2 = iaa.Identity(name="Identity2")
seq2 = iaa.Sequential([flipud, noop2], name="Seq2")
seq1 = iaa.Sequential([noop1, fliplr, seq2], name="Seq")
return seq1
def test_remove_by_name(self):
def _func(aug, parents):
return aug.name == "Seq2"
augs = self.seq
augs = augs.remove_augmenters(_func)
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 1
assert seqs[0].name == "Seq"
def test_remove_by_name_and_parents_arg(self):
def _func(aug, parents):
return aug.name == "Seq2" and len(parents) == 0
augs = self.seq
augs = augs.remove_augmenters(_func)
seqs = augs.find_augmenters_by_name(r"Seq.*", regex=True)
assert len(seqs) == 2
assert seqs[0].name == "Seq"
assert seqs[1].name == "Seq2"
def test_remove_all_without_inplace_removal(self):
def _func(aug, parents):
return True
augs = self.seq
augs = augs.remove_augmenters(_func)
assert augs is not None
assert isinstance(augs, iaa.Identity)
def test_remove_all_with_inplace_removal(self):
def _func(aug, parents):
return aug.name == "Seq"
augs = self.seq
got_exception = False
try:
_ = augs.remove_augmenters(_func, copy=False)
except Exception as exc:
got_exception = True
expected = (
"Inplace removal of topmost augmenter requested, "
"which is currently not possible")
assert expected in str(exc)
assert got_exception
def test_remove_all_without_inplace_removal_and_no_identity(self):
def _func(aug, parents):
return True
augs = self.seq
augs = augs.remove_augmenters(_func, identity_if_topmost=False)
assert augs is None
def test_remove_all_without_inplace_removal_and_no_noop(self):
def _func(aug, parents):
return True
augs = self.seq
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
augs = augs.remove_augmenters(_func, noop_if_topmost=False)
assert len(caught_warnings) == 1
assert "deprecated" in str(caught_warnings[-1].message)
assert augs is None
class TestAugmenter_copy_random_state(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
return ia.quokka_square(size=(128, 128))
@property
def images(self):
return np.array([self.image] * 64, dtype=np.uint8)
@property
def source(self):
source = iaa.Sequential([
iaa.Fliplr(0.5, name="hflip"),
iaa.Dropout(0.05, name="dropout"),
iaa.Affine(translate_px=(-10, 10), name="translate",
seed=3),
iaa.GaussianBlur(1.0, name="blur", seed=4)
], seed=5)
return source
@property
def target(self):
target = iaa.Sequential([
iaa.Fliplr(0.5, name="hflip"),
iaa.Dropout(0.05, name="dropout"),
iaa.Affine(translate_px=(-10, 10), name="translate")
])
return target
def test_matching_position(self):
def _func(aug, parents):
return aug.name == "blur"
images = self.images
source = self.source
target = self.target
source.localize_random_state_()
target_cprs = target.copy_random_state(source, matching="position")
source_alt = source.remove_augmenters(_func)
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
assert target_cprs.random_state.equals(source_alt.random_state)
for i in sm.xrange(3):
assert target_cprs[i].random_state.equals(
source_alt[i].random_state)
assert np.array_equal(images_aug_source, images_aug_target)
def test_matching_position_copy_determinism(self):
def _func(aug, parents):
return aug.name == "blur"
images = self.images
source = self.source
target = self.target
source.localize_random_state_()
source[0].deterministic = True
target_cprs = target.copy_random_state(
source, matching="position", copy_determinism=True)
source_alt = source.remove_augmenters(_func)
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
assert target_cprs[0].deterministic is True
assert np.array_equal(images_aug_source, images_aug_target)
def test_matching_name(self):
def _func(aug, parents):
return aug.name == "blur"
images = self.images
source = self.source
target = self.target
source.localize_random_state_()
target_cprs = target.copy_random_state(source, matching="name")
source_alt = source.remove_augmenters(_func)
images_aug_source = source_alt.augment_images(images)
images_aug_target = target_cprs.augment_images(images)
assert np.array_equal(images_aug_source, images_aug_target)
def test_matching_name_copy_determinism(self):
def _func(aug, parents):
return aug.name == "blur"
images = self.images
source = self.source
target = self.target
source.localize_random_state_()
source_alt = source.remove_augmenters(_func)
source_det = source_alt.to_deterministic()
target_cprs_det = target.copy_random_state(
source_det, matching="name", copy_determinism=True)
images_aug_source1 = source_det.augment_images(images)
images_aug_target1 = target_cprs_det.augment_images(images)
images_aug_source2 = source_det.augment_images(images)
images_aug_target2 = target_cprs_det.augment_images(images)
assert np.array_equal(images_aug_source1, images_aug_source2)
assert np.array_equal(images_aug_target1, images_aug_target2)
assert np.array_equal(images_aug_source1, images_aug_target1)
assert np.array_equal(images_aug_source2, images_aug_target2)
def test_copy_fails_when_source_rngs_are_not_localized__name(self):
source = iaa.Fliplr(0.5, name="hflip")
target = iaa.Fliplr(0.5, name="hflip")
got_exception = False
try:
_ = target.copy_random_state(source, matching="name")
except Exception as exc:
got_exception = True
assert "localize_random_state" in str(exc)
assert got_exception
def test_copy_fails_when_source_rngs_are_not_localized__position(self):
source = iaa.Fliplr(0.5, name="hflip")
target = iaa.Fliplr(0.5, name="hflip")
got_exception = False
try:
_ = target.copy_random_state(source, matching="position")
except Exception as exc:
got_exception = True
assert "localize_random_state" in str(exc)
assert got_exception
def test_copy_fails_when_names_not_match_and_matching_not_tolerant(self):
source = iaa.Fliplr(0.5, name="hflip-other-name")
target = iaa.Fliplr(0.5, name="hflip")
source.localize_random_state_()
got_exception = False
try:
_ = target.copy_random_state(
source, matching="name", matching_tolerant=False)
except Exception as exc:
got_exception = True
assert "not found among source augmenters" in str(exc)
assert got_exception
def test_copy_fails_for_not_tolerant_position_matching(self):
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"),
iaa.Fliplr(0.5, name="hflip2")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
got_exception = False
try:
_ = target.copy_random_state(
source, matching="position", matching_tolerant=False)
except Exception as exc:
got_exception = True
assert "different lengths" in str(exc)
assert got_exception
def test_copy_fails_for_unknown_matching_method(self):
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"),
iaa.Fliplr(0.5, name="hflip2")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
got_exception = False
try:
_ = target.copy_random_state(source, matching="test")
except Exception as exc:
got_exception = True
assert "Unknown matching method" in str(exc)
assert got_exception
def test_warn_if_multiple_augmenters_with_same_name(self):
source = iaa.Sequential([iaa.Fliplr(0.5, name="hflip"),
iaa.Fliplr(0.5, name="hflip")])
target = iaa.Sequential([iaa.Fliplr(0.5, name="hflip")])
source.localize_random_state_()
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
_ = target.copy_random_state(source, matching="name")
assert len(caught_warnings) == 1
assert (
"contains multiple augmenters with the same name"
in str(caught_warnings[-1].message)
)
# TODO these tests change the input type from list to array. Might be
# reasonable to change and test that scenario separetely
class TestAugmenterHooks(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
image = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8)
return np.atleast_3d(image)
@property
def image_lr(self):
image_lr = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8)
return np.atleast_3d(image_lr)
@property
def image_lrud(self):
image_lrud = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8)
return np.atleast_3d(image_lrud)
def test_preprocessor(self):
def preprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(preprocessor=preprocessor)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
expected = np.copy(self.image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
def test_postprocessor(self):
def postprocessor(images, augmenter, parents):
img = np.copy(images)
img[0][1, 1, 0] += 1
return img
hooks = ia.HooksImages(postprocessor=postprocessor)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
expected = np.copy(self.image_lrud)
expected[1, 1, 0] = 3
assert np.array_equal(images_aug[0], expected)
def test_propagator(self):
def propagator(images, augmenter, parents, default):
if "Seq" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(propagator=propagator)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
assert np.array_equal(images_aug[0], self.image)
def test_activator(self):
def activator(images, augmenter, parents, default):
if "Flipud" in augmenter.name:
return False
else:
return default
hooks = ia.HooksImages(activator=activator)
seq = iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)])
images_aug = seq.augment_images([self.image], hooks=hooks)
assert np.array_equal(images_aug[0], self.image_lr)
def test_activator_keypoints(self):
def activator(keypoints_on_images, augmenter, parents, default):
return False
hooks = ia.HooksKeypoints(activator=activator)
kps = [ia.Keypoint(x=1, y=0), ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 10, 3))
aug = iaa.Affine(translate_px=1)
keypoints_aug = aug.augment_keypoints(kpsoi, hooks=hooks)
assert keypoints_equal([keypoints_aug], [kpsoi])
class TestAugmenterWithLoadedImages(unittest.TestCase):
def setUp(self):
reseed()
def test_with_cv2(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath, "temp_cv2.png")
imageio.imwrite(imgpath, image)
image_reloaded = cv2.imread(imgpath)[:, :, ::-1]
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
def test_with_imageio(self):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath, "temp_imageio.png")
imageio.imwrite(imgpath, image)
image_reloaded = imageio.imread(imgpath)
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
def test_with_pil(self):
fnames = ["asarray", "array"]
for fname in fnames:
with self.subTest(fname=fname):
image = np.arange(10*20).astype(np.uint8).reshape((10, 20, 1))
image = np.tile(image, (1, 1, 3))
image[:, :, 0] += 0
image[:, :, 1] += 1
image[:, :, 2] += 2
images = image[np.newaxis, :, :, :]
image_cp = np.copy(image)
images_cp = np.copy(images)
aug_arrs = _InplaceDummyAugmenterImgsArray(1)
aug_lists = _InplaceDummyAugmenterImgsList(1)
with TemporaryDirectory() as dirpath:
imgpath = os.path.join(dirpath,
"temp_pil_%s.png" % (fname,))
imageio.imwrite(imgpath, image)
image_reloaded = getattr(np, fname)(PIL.Image.open(imgpath))
images_reloaded = image_reloaded[np.newaxis, :, :, :]
image_aug = aug_lists(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
image_aug = aug_lists.augment_image(image=image_reloaded)
assert image_aug is not image_reloaded
assert np.array_equal(image_reloaded, image_cp)
assert np.array_equal(image_aug, image_cp + 1)
images_aug = aug_arrs(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
images_aug = aug_arrs.augment_images(images=images_reloaded)
assert images_aug is not images_reloaded
assert np.array_equal(images_reloaded, images_cp)
assert np.array_equal(images_aug, images_cp + 1)
class TestSequential(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image)
@property
def images(self):
return np.array([self.image], dtype=np.uint8)
@property
def image_lr(self):
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
return np.atleast_3d(image_lr)
@property
def images_lr(self):
return np.array([self.image_lr], dtype=np.uint8)
@property
def image_ud(self):
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image_ud)
@property
def images_ud(self):
return np.array([self.image_ud], dtype=np.uint8)
@property
def image_lr_ud(self):
image_lr_ud = np.array([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]], dtype=np.uint8) * 255
return np.atleast_3d(image_lr_ud)
@property
def images_lr_ud(self):
return np.array([self.image_lr_ud])
@property
def keypoints(self):
kps = [ia.Keypoint(x=1, y=0),
ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def keypoints_aug(self):
kps = [ia.Keypoint(x=3-1, y=3-0),
ia.Keypoint(x=3-2, y=3-0),
ia.Keypoint(x=3-2, y=3-1)]
return ia.KeypointsOnImage(kps, shape=self.image.shape)
@property
def polygons(self):
polygon = ia.Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
return ia.PolygonsOnImage([polygon], shape=self.image.shape)
@property
def polygons_aug(self):
polygon = ia.Polygon([(3-0, 3-0), (3-2, 3-0), (3-2, 3-2), (3-0, 3-2)])
return ia.PolygonsOnImage([polygon], shape=self.image.shape)
@property
def lsoi(self):
ls = ia.LineString([(0, 0), (2, 0), (2, 2), (0, 2)])
return ia.LineStringsOnImage([ls], shape=self.image.shape)
@property
def lsoi_aug(self):
ls = ia.LineString([(3-0, 3-0), (3-2, 3-0), (3-2, 3-2), (3-0, 3-2)])
return ia.LineStringsOnImage([ls], shape=self.image.shape)
@property
def bbsoi(self):
bb = ia.BoundingBox(x1=0, y1=0, x2=2, y2=2)
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def bbsoi_aug(self):
x1 = 3-0
x2 = 3-2
y1 = 3-0
y2 = 3-2
bb = ia.BoundingBox(x1=min(x1, x2), y1=min(y1, y2),
x2=max(x1, x2), y2=max(y1, y2))
return ia.BoundingBoxesOnImage([bb], shape=self.image.shape)
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0, 0, 1.0],
[0, 0, 1.0],
[0, 1.0, 1.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=self.image.shape)
@property
def heatmaps_aug(self):
heatmaps_arr_expected = np.float32([[1.0, 1.0, 0.0],
[1.0, 0, 0],
[1.0, 0, 0]])
return ia.HeatmapsOnImage(heatmaps_arr_expected, shape=self.image.shape)
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=self.image.shape)
@property
def segmaps_aug(self):
segmaps_arr_expected = np.int32([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]])
return ia.SegmentationMapsOnImage(segmaps_arr_expected,
shape=self.image.shape)
@property
def seq_two_flips(self):
return iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
])
def test_images__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_lr_ud)
def test_images__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_lr_ud)
def test_images_as_list__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_images([self.image])
assert array_equal_lists(observed, [self.image_lr_ud])
def test_images_as_list__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.image])
assert array_equal_lists(observed, [self.image_lr_ud])
def test_keypoints__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed, [self.keypoints_aug])
def test_keypoints__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed, [self.keypoints_aug])
def test_polygons__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_aug)
def test_polygons__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_aug)
def test_line_strings__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_aug)
def test_line_strings__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_aug)
def test_bounding_boxes__two_flips(self):
aug = self.seq_two_flips
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_aug)
def test_bounding_boxes__two_flips__deterministic(self):
aug = self.seq_two_flips
aug_det = aug.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_aug)
def test_heatmaps__two_flips(self):
aug = self.seq_two_flips
heatmaps = self.heatmaps
observed = aug.augment_heatmaps([heatmaps])[0]
assert observed.shape == (3, 3, 1)
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1.0 - 1e-6 < observed.max_value < 1.0 + 1e-6
assert np.allclose(observed.get_arr(),
self.heatmaps_aug.get_arr())
def test_segmentation_maps__two_flips(self):
aug = self.seq_two_flips
segmaps = self.segmaps
observed = aug.augment_segmentation_maps([segmaps])[0]
assert observed.shape == (3, 3, 1)
assert np.array_equal(observed.get_arr(),
self.segmaps_aug.get_arr())
def test_children_not_provided(self):
aug = iaa.Sequential()
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_children_are_none(self):
aug = iaa.Sequential(children=None)
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_children_is_single_augmenter_without_list(self):
aug = iaa.Sequential(iaa.Fliplr(1.0))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
def test_children_is_a_sequential(self):
aug = iaa.Sequential(iaa.Sequential(iaa.Fliplr(1.0)))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
def test_children_is_list_of_sequentials(self):
aug = iaa.Sequential([
iaa.Sequential(iaa.Flipud(1.0)),
iaa.Sequential(iaa.Fliplr(1.0))
])
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(np.flipud(image)))
def test_randomness__two_flips(self):
# 50% horizontal flip, 50% vertical flip
aug = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5)
])
frac_same = self._test_randomness__two_flips__compute_fraction_same(
aug, 200)
assert np.isclose(frac_same, 0.25, rtol=0, atol=0.1)
def test_randomness__two_flips__deterministic(self):
# 50% horizontal flip, 50% vertical flip
aug = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.Flipud(0.5)
])
aug_det = aug.to_deterministic()
frac_same = self._test_randomness__two_flips__compute_fraction_same(
aug_det, 200)
assert (
np.isclose(frac_same, 0.0, rtol=0, atol=1e-5)
or np.isclose(frac_same, 1.0, rtol=0, atol=1e-5)
)
def _test_randomness__two_flips__compute_fraction_same(self, aug,
nb_iterations):
expected = [self.images, self.images_lr, self.images_ud,
self.images_lr_ud]
last_aug = None
nb_changed_aug = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
assert np.any([np.array_equal(observed_aug, expected_i)
for expected_i in expected])
# should be the same in roughly 25% of all cases
frac_changed = nb_changed_aug / nb_iterations
return 1 - frac_changed
def test_random_order_true_images(self):
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}, mode="constant", cval=0, order=0),
iaa.Fliplr(1.0)
], random_order=True)
frac_12 = self._test_random_order_images_frac_12(aug, 200)
assert np.isclose(frac_12, 0.5, 0.075)
def test_random_order_false_images(self):
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}, mode="constant", cval=0, order=0),
iaa.Fliplr(1.0)
], random_order=False)
frac_12 = self._test_random_order_images_frac_12(aug, 25)
assert frac_12 >= 1.0 - 1e-4
def test_random_order_true_deterministic_images(self):
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}, mode="constant", cval=0, order=0),
iaa.Fliplr(1.0)
], random_order=True)
aug = aug.to_deterministic()
frac_12 = self._test_random_order_images_frac_12(aug, 25)
assert (frac_12 >= 1.0-1e-4 or frac_12 <= 0.0+1e-4)
@classmethod
def _test_random_order_images_frac_12(cls, aug, nb_iterations):
image = np.uint8([[0, 1],
[2, 3]])
image_12 = np.uint8([[0, 0],
[2, 0]])
image_21 = np.uint8([[0, 1],
[0, 3]])
seen = [False, False]
for _ in sm.xrange(nb_iterations):
observed = aug.augment_images([image])[0]
if np.array_equal(observed, image_12):
seen[0] = True
elif np.array_equal(observed, image_21):
seen[1] = True
else:
assert False
frac_12 = seen[0] / np.sum(seen)
return frac_12
# TODO add random_order=False
def test_random_order_heatmaps(self):
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}),
iaa.Fliplr(1.0)
], random_order=True)
heatmaps_arr = np.float32([[0, 0, 1.0],
[0, 0, 1.0],
[0, 1.0, 1.0]])
heatmaps_arr_expected1 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr_expected2 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
seen = [False, False]
for _ in sm.xrange(100):
observed = aug.augment_heatmaps([
ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))])[0]
if np.allclose(observed.get_arr(), heatmaps_arr_expected1):
seen[0] = True
elif np.allclose(observed.get_arr(), heatmaps_arr_expected2):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
# TODO add random_order=False
def test_random_order_segmentation_maps(self):
aug = iaa.Sequential([
iaa.Affine(translate_px={"x": 1}),
iaa.Fliplr(1.0)
], random_order=True)
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
segmaps_arr_expected1 = np.int32([[0, 0, 0],
[0, 0, 0],
[1, 0, 0]])
segmaps_arr_expected2 = np.int32([[0, 1, 0],
[0, 1, 0],
[0, 1, 1]])
seen = [False, False]
for _ in sm.xrange(100):
observed = aug.augment_segmentation_maps([
SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))])[0]
if np.array_equal(observed.get_arr(), segmaps_arr_expected1):
seen[0] = True
elif np.array_equal(observed.get_arr(), segmaps_arr_expected2):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
# TODO add random_order=False
def test_random_order_keypoints(self):
KP = ia.Keypoint
kps = [KP(0, 0), KP(2, 0), KP(2, 2)]
kps_12 = [KP((0+1)*2, 0), KP((2+1)*2, 0), KP((2+1)*2, 2)]
kps_21 = [KP((0*2)+1, 0), KP((2*2)+1, 0), KP((2*2)+1, 2)]
kpsoi = ia.KeypointsOnImage(kps, shape=(3, 3))
kpsoi_12 = ia.KeypointsOnImage(kps_12, shape=(3, 3))
kpsoi_21 = ia.KeypointsOnImage(kps_21, shape=(3, 3))
def func1(keypoints_on_images, random_state, parents, hooks):
for kpsoi in keypoints_on_images:
for kp in kpsoi.keypoints:
kp.x += 1
return keypoints_on_images
def func2(keypoints_on_images, random_state, parents, hooks):
for kpsoi in keypoints_on_images:
for kp in kpsoi.keypoints:
kp.x *= 2
return keypoints_on_images
aug_1 = iaa.Lambda(func_keypoints=func1)
aug_2 = iaa.Lambda(func_keypoints=func2)
seq = iaa.Sequential([aug_1, aug_2], random_order=True)
seen = [False, False]
for _ in sm.xrange(100):
observed = seq.augment_keypoints(kpsoi)
if np.allclose(observed.to_xy_array(), kpsoi_12.to_xy_array()):
seen[0] = True
elif np.allclose(observed.to_xy_array(), kpsoi_21.to_xy_array()):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
# TODO add random_order=False
def test_random_order_polygons(self):
cba = ia.Polygon([(0, 0), (1, 0), (1, 1)])
cba_12 = ia.Polygon([(0, 0), (1, 0), ((1+1)*2, 1)])
cba_21 = ia.Polygon([(0, 0), (1, 0), ((1*2)+1, 1)])
cbaoi = ia.PolygonsOnImage([cba], shape=(3, 3))
def func1(polygons_on_images, random_state, parents, hooks):
for cbaoi_ in polygons_on_images:
for cba_ in cbaoi_.items:
cba_.exterior[-1, 0] += 1
return polygons_on_images
def func2(polygons_on_images, random_state, parents, hooks):
for cbaoi_ in polygons_on_images:
for cba_ in cbaoi_.items:
cba_.exterior[-1, 0] *= 2
return polygons_on_images
aug_1 = iaa.Lambda(func_polygons=func1)
aug_2 = iaa.Lambda(func_polygons=func2)
seq = iaa.Sequential([aug_1, aug_2], random_order=True)
seen = [False, False]
for _ in sm.xrange(100):
observed = seq.augment_polygons(cbaoi)
if np.allclose(observed.items[0].coords, cba_12.coords):
seen[0] = True
elif np.allclose(observed.items[0].coords, cba_21.coords):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
# TODO add random_order=False
def test_random_order_line_strings(self):
cba = ia.LineString([(0, 0), (1, 0), (1, 1)])
cba_12 = ia.LineString([(0, 0), (1, 0), ((1+1)*2, 1)])
cba_21 = ia.LineString([(0, 0), (1, 0), ((1*2)+1, 1)])
cbaoi = ia.LineStringsOnImage([cba], shape=(3, 3))
def func1(line_strings_on_images, random_state, parents, hooks):
for cbaoi_ in line_strings_on_images:
for cba_ in cbaoi_.items:
cba_.coords[-1, 0] += 1
return line_strings_on_images
def func2(line_strings_on_images, random_state, parents, hooks):
for cbaoi_ in line_strings_on_images:
for cba_ in cbaoi_.items:
cba_.coords[-1, 0] *= 2
return line_strings_on_images
aug_1 = iaa.Lambda(func_line_strings=func1)
aug_2 = iaa.Lambda(func_line_strings=func2)
seq = iaa.Sequential([aug_1, aug_2], random_order=True)
seen = [False, False]
for _ in sm.xrange(100):
observed = seq.augment_line_strings(cbaoi)
if np.allclose(observed.items[0].coords, cba_12.coords):
seen[0] = True
elif np.allclose(observed.items[0].coords, cba_21.coords):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
# TODO add random_order=False
def test_random_order_bounding_boxes(self):
bbs = [ia.BoundingBox(x1=1, y1=2, x2=30, y2=40)]
bbs_12 = [ia.BoundingBox(x1=(1+1)*2, y1=2, x2=30, y2=40)]
bbs_21 = [ia.BoundingBox(x1=(1*2)+1, y1=2, x2=30, y2=40)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(3, 3))
bbsoi_12 = ia.BoundingBoxesOnImage(bbs_12, shape=(3, 3))
bbsoi_21 = ia.BoundingBoxesOnImage(bbs_21, shape=(3, 3))
def func1(bounding_boxes_on_images, random_state, parents, hooks):
for bbsoi in bounding_boxes_on_images:
for bb in bbsoi.bounding_boxes:
bb.x1 += 1
return bounding_boxes_on_images
def func2(bounding_boxes_on_images, random_state, parents, hooks):
for bbsoi in bounding_boxes_on_images:
for bb in bbsoi.bounding_boxes:
bb.x1 *= 2
return bounding_boxes_on_images
aug_1 = iaa.Lambda(func_bounding_boxes=func1)
aug_2 = iaa.Lambda(func_bounding_boxes=func2)
seq = iaa.Sequential([aug_1, aug_2], random_order=True)
seen = [False, False]
for _ in sm.xrange(100):
observed = seq.augment_bounding_boxes(bbsoi)
if np.allclose(observed.to_xyxy_array(),
bbsoi_12.to_xyxy_array()):
seen[0] = True
elif np.allclose(observed.to_xyxy_array(),
bbsoi_21.to_xyxy_array()):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for random_order in [False, True]:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sequential([iaa.Identity()],
random_order=random_order)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
for random_order in [False, True]:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sequential([iaa.Identity()],
random_order=random_order)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_add_to_empty_sequential(self):
aug = iaa.Sequential()
aug.add(iaa.Fliplr(1.0))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(image))
def test_add_to_sequential_with_child(self):
aug = iaa.Sequential(iaa.Fliplr(1.0))
aug.add(iaa.Flipud(1.0))
image = np.arange(4*4).reshape((4, 4)).astype(np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, np.fliplr(np.flipud(image)))
def test_get_parameters(self):
aug1 = iaa.Sequential(iaa.Fliplr(1.0), random_order=False)
aug2 = iaa.Sequential(iaa.Fliplr(1.0), random_order=True)
assert aug1.get_parameters() == [False]
assert aug2.get_parameters() == [True]
def test_get_children_lists(self):
flip = iaa.Fliplr(1.0)
aug = iaa.Sequential(flip)
assert aug.get_children_lists() == [aug]
def test_to_deterministic(self):
child = iaa.Identity()
aug = iaa.Sequential([child])
aug_det = aug.to_deterministic()
assert aug_det.random_state is not aug.random_state
assert aug_det.deterministic
assert aug_det[0].deterministic
def test___str___and___repr__(self):
flip = iaa.Fliplr(1.0)
aug = iaa.Sequential(flip, random_order=True)
expected = (
"Sequential("
"name=%s, random_order=%s, children=[%s], deterministic=%s"
")" % (aug.name, "True", str(flip), "False")
)
assert aug.__str__() == aug.__repr__() == expected
def test_other_dtypes_noop__bool(self):
for random_order in [False, True]:
aug = iaa.Sequential([
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == image)
def test_other_dtypes__noop__uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype, random_order in itertools.product(dtypes, [False, True]):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.Sequential([
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_noop__float(self):
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for random_order in [False, True]:
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.Sequential([
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_flips__bool(self):
for random_order in [False, True]:
# note that we use 100% probabilities with square images here,
# so random_order does not influence the output
aug = iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
], random_order=random_order)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
expected = np.zeros((3, 3), dtype=bool)
expected[2, 2] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == "bool"
assert np.all(image_aug == expected)
def test_other_dtypes__flips__uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype, random_order in itertools.product(dtypes, [False, True]):
with self.subTest(dtype=dtype, random_order=random_order):
# note that we use 100% probabilities with square images here,
# so random_order does not influence the output
aug = iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
], random_order=random_order)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = np.zeros((3, 3), dtype=dtype)
expected[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, expected)
def test_other_dtypes_flips__float(self):
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for random_order in [False, True]:
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype, random_order=random_order):
# note that we use 100% probabilities with square images
# here, so random_order does not influence the output
aug = iaa.Sequential([
iaa.Fliplr(1.0),
iaa.Flipud(1.0)
], random_order=random_order)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = np.zeros((3, 3), dtype=dtype)
expected[2, 2] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == expected)
def test_pickleable(self):
aug = iaa.Sequential(
[iaa.Add(1, seed=1),
iaa.Multiply(3, seed=2)],
random_order=True,
seed=3)
runtest_pickleable_uint8_img(aug, iterations=5)
class TestSomeOf(unittest.TestCase):
def setUp(self):
reseed()
def test_children_are_empty_list(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
aug = iaa.SomeOf(n=0, children=[])
observed = aug.augment_image(zeros)
assert np.array_equal(observed, zeros)
def test_children_are_not_provided(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
aug = iaa.SomeOf(n=0)
observed = aug.augment_image(zeros)
assert np.array_equal(observed, zeros)
def test_several_children_and_various_fixed_n(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
children = [iaa.Add(1), iaa.Add(2), iaa.Add(3)]
ns = [0, 1, 2, 3, 4, None, (2, None), (2, 2),
iap.Deterministic(3)]
expecteds = [[0], # 0
[9*1, 9*2, 9*3], # 1
[9*1+9*2, 9*1+9*3, 9*2+9*3], # 2
[9*1+9*2+9*3], # 3
[9*1+9*2+9*3], # 4
[9*1+9*2+9*3], # None
[9*1+9*2, 9*1+9*3, 9*2+9*3, 9*1+9*2+9*3], # (2, None)
[9*1+9*2, 9*1+9*3, 9*2+9*3], # (2, 2)
[9*1+9*2+9*3]] # Deterministic(3)
for n, expected in zip(ns, expecteds):
with self.subTest(n=n):
aug = iaa.SomeOf(n=n, children=children)
observed = aug.augment_image(zeros)
assert np.sum(observed) in expected
def test_several_children_and_n_as_tuple(self):
zeros = np.zeros((1, 1, 1), dtype=np.uint8)
augs = [iaa.Add(2**0), iaa.Add(2**1), iaa.Add(2**2)]
aug = iaa.SomeOf(n=(0, 3), children=augs)
nb_iterations = 1000
nb_observed = [0, 0, 0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(zeros)
s = observed[0, 0, 0]
if s == 0:
nb_observed[0] += 1
else:
if s & 2**0 > 0:
nb_observed[1] += 1
if s & 2**1 > 0:
nb_observed[2] += 1
if s & 2**2 > 0:
nb_observed[3] += 1
p_observed = [n/nb_iterations for n in nb_observed]
assert np.isclose(p_observed[0], 0.25, rtol=0, atol=0.1)
assert np.isclose(p_observed[1], 0.5, rtol=0, atol=0.1)
assert np.isclose(p_observed[2], 0.5, rtol=0, atol=0.1)
assert np.isclose(p_observed[3], 0.5, rtol=0, atol=0.1)
def test_several_children_and_various_fixed_n__heatmaps(self):
augs = [iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": 1})]
heatmaps_arr = np.float32([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr0 = np.float32([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0]])
heatmaps_arr1 = np.float32([[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0]])
heatmaps_arr2 = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0]])
heatmaps_arr3 = np.float32([[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]])
ns = [0, 1, 2, 3, None]
expecteds = [[heatmaps_arr0],
[heatmaps_arr1],
[heatmaps_arr2],
[heatmaps_arr3],
[heatmaps_arr3]]
for n, expected in zip(ns, expecteds):
with self.subTest(n=n):
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
aug = iaa.SomeOf(n=n, children=augs)
observed = aug.augment_heatmaps(heatmaps)
assert observed.shape == (3, 3, 3)
assert np.isclose(observed.min_value, 0.0)
assert np.isclose(observed.max_value, 1.0)
matches = [
np.allclose(observed.get_arr(), expected_i)
for expected_i in expected]
assert np.any(matches)
def test_several_children_and_various_fixed_n__segmaps(self):
augs = [iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"x": 1})]
segmaps_arr = np.int32([[1, 0, 0],
[1, 0, 0],
[1, 0, 0]])
segmaps_arr0 = np.int32([[1, 0, 0],
[1, 0, 0],
[1, 0, 0]])
segmaps_arr1 = np.int32([[0, 1, 0],
[0, 1, 0],
[0, 1, 0]])
segmaps_arr2 = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 0, 1]])
segmaps_arr3 = np.int32([[0, 0, 0],
[0, 0, 0],
[0, 0, 0]])
ns = [0, 1, 2, 3, None]
expecteds = [[segmaps_arr0],
[segmaps_arr1],
[segmaps_arr2],
[segmaps_arr3],
[segmaps_arr3]]
for n, expected in zip(ns, expecteds):
with self.subTest(n=n):
segmaps = SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
aug = iaa.SomeOf(n=n, children=augs)
observed = aug.augment_segmentation_maps(segmaps)
assert observed.shape == (3, 3, 3)
matches = [
np.array_equal(observed.get_arr(), expected_i)
for expected_i in expected]
assert np.any(matches)
def _test_several_children_and_various_fixed_n__cbaois(
self, cbaoi, augf_name):
augs = [iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"y": 1})]
cbaoi_x = cbaoi.shift(x=1)
cbaoi_y = cbaoi.shift(y=1)
cbaoi_xy = cbaoi.shift(x=1, y=1)
ns = [0, 1, 2, None]
expecteds = [[cbaoi],
[cbaoi_x, cbaoi_y],
[cbaoi_xy],
[cbaoi_xy]]
for n, expected in zip(ns, expecteds):
with self.subTest(n=n):
aug = iaa.SomeOf(n=n, children=augs)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
cba = cbaoi_aug.items[0]
assert len(cbaoi_aug.items) == len(cbaoi.items)
assert cbaoi_aug.shape == (5, 6, 3)
if hasattr(cba, "is_valid"):
assert cba.is_valid
matches = [
cba.coords_almost_equals(cbaoi_i.items[0])
for cbaoi_i in expected
]
assert np.any(matches)
def test_several_children_and_various_fixed_n__keypoints(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 6, 3))
self._test_several_children_and_various_fixed_n__cbaois(
kpsoi, "augment_keypoints")
def test_several_children_and_various_fixed_n__polygons(self):
ps = [ia.Polygon([(0, 0), (3, 0), (3, 3), (0, 3)])]
psoi = ia.PolygonsOnImage(ps, shape=(5, 6, 3))
self._test_several_children_and_various_fixed_n__cbaois(
psoi, "augment_polygons")
def test_several_children_and_various_fixed_n__line_strings(self):
ls = [ia.LineString([(0, 0), (3, 0), (3, 3), (0, 3)])]
lsoi = ia.LineStringsOnImage(ls, shape=(5, 6, 3))
self._test_several_children_and_various_fixed_n__cbaois(
lsoi, "augment_line_strings")
def test_several_children_and_various_fixed_n__bounding_boxes(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=3, y2=3)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(5, 6, 3))
self._test_several_children_and_various_fixed_n__cbaois(
bbsoi, "augment_bounding_boxes")
@classmethod
def _test_empty_cbaoi(cls, cbaoi, augf_name):
augs = [iaa.Affine(translate_px={"x": 1}),
iaa.Affine(translate_px={"y": 1})]
aug = iaa.SomeOf(n=2, children=augs)
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(cbaoi_aug, cbaoi)
def test_empty_keypoints_on_image_instance(self):
kpsoi = ia.KeypointsOnImage([], shape=(5, 6, 3))
self._test_empty_cbaoi(kpsoi, "augment_keypoints")
def test_empty_polygons_on_image_instance(self):
psoi = ia.PolygonsOnImage([], shape=(5, 6, 3))
self._test_empty_cbaoi(psoi, "augment_polygons")
def test_empty_line_strings_on_image_instance(self):
lsoi = ia.LineStringsOnImage([], shape=(5, 6, 3))
self._test_empty_cbaoi(lsoi, "augment_line_strings")
def test_empty_bounding_boxes_on_image_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(5, 6, 3))
self._test_empty_cbaoi(bbsoi, "augment_bounding_boxes")
def test_random_order_false__images(self):
augs = [iaa.Multiply(2.0), iaa.Add(100)]
aug = iaa.SomeOf(n=2, children=augs, random_order=False)
p_observed = self._test_random_order(aug, 10)
assert np.isclose(p_observed[0], 1.0, rtol=0, atol=1e-8)
assert np.isclose(p_observed[1], 0.0, rtol=0, atol=1e-8)
def test_random_order_true__images(self):
augs = [iaa.Multiply(2.0), iaa.Add(100)]
aug = iaa.SomeOf(n=2, children=augs, random_order=True)
p_observed = self._test_random_order(aug, 300)
assert np.isclose(p_observed[0], 0.5, rtol=0, atol=0.15)
assert np.isclose(p_observed[1], 0.5, rtol=0, atol=0.15)
@classmethod
def _test_random_order(cls, aug, nb_iterations):
zeros = np.ones((1, 1, 1), dtype=np.uint8)
nb_observed = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(zeros)
s = np.sum(observed)
if s == (1*2)+100:
nb_observed[0] += 1
elif s == (1+100)*2:
nb_observed[1] += 1
else:
raise Exception("Unexpected sum: %.8f (@2)" % (s,))
p_observed = [n/nb_iterations for n in nb_observed]
return p_observed
@classmethod
def _test_images_and_cbaoi_aligned(cls, cbaoi, augf_name):
img = np.zeros((3, 3), dtype=np.uint8)
img_x = np.copy(img)
img_y = np.copy(img)
img_xy = np.copy(img)
img[1, 1] = 255
img_x[1, 2] = 255
img_y[2, 1] = 255
img_xy[2, 2] = 255
augs = [
iaa.Affine(translate_px={"x": 1}, order=0),
iaa.Affine(translate_px={"y": 1}, order=0)
]
cbaoi_x = cbaoi.shift(x=1)
cbaoi_y = cbaoi.shift(y=1)
cbaoi_xy = cbaoi.shift(x=1, y=1)
aug = iaa.SomeOf((0, 2), children=augs)
seen = [False, False, False, False]
for _ in sm.xrange(100):
aug_det = aug.to_deterministic()
img_aug = aug_det.augment_image(img)
cbaoi_aug = getattr(aug_det, augf_name)(cbaoi)
if np.array_equal(img_aug, img):
assert_cbaois_equal(cbaoi_aug, cbaoi)
seen[0] = True
elif np.array_equal(img_aug, img_x):
assert_cbaois_equal(cbaoi_aug, cbaoi_x)
seen[1] = True
elif np.array_equal(img_aug, img_y):
assert_cbaois_equal(cbaoi_aug, cbaoi_y)
seen[2] = True
elif np.array_equal(img_aug, img_xy):
assert_cbaois_equal(cbaoi_aug, cbaoi_xy)
seen[3] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_images_and_keypoints_aligned(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 6, 3))
self._test_images_and_cbaoi_aligned(kpsoi, "augment_keypoints")
def test_images_and_polygons_aligned(self):
ps = [ia.Polygon([(0, 0), (3, 0), (3, 3), (0, 3)])]
psoi = ia.PolygonsOnImage(ps, shape=(5, 6, 3))
self._test_images_and_cbaoi_aligned(psoi, "augment_polygons")
def test_images_and_line_strings_aligned(self):
ls = [ia.LineString([(0, 0), (3, 0), (3, 3), (0, 3)])]
lsoi = ia.LineStringsOnImage(ls, shape=(5, 6, 3))
self._test_images_and_cbaoi_aligned(lsoi, "augment_line_strings")
def test_images_and_bounding_boxes_aligned(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=3, y2=3)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(5, 6, 3))
self._test_images_and_cbaoi_aligned(bbsoi, "augment_bounding_boxes")
def test_invalid_argument_as_children(self):
got_exception = False
try:
_ = iaa.SomeOf(1, children=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_invalid_datatype_as_n(self):
got_exception = False
try:
_ = iaa.SomeOf(False, children=iaa.Fliplr(1.0))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_invalid_tuple_as_n(self):
got_exception = False
try:
_ = iaa.SomeOf((2, "test"), children=iaa.Fliplr(1.0))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_invalid_none_none_tuple_as_n(self):
got_exception = False
try:
_ = iaa.SomeOf((None, None), children=iaa.Fliplr(1.0))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_with_children_that_change_shapes_keep_size_false(self):
# test for https://github.com/aleju/imgaug/issues/143
# (shapes change in child augmenters, leading to problems if input
# arrays are assumed to stay input arrays)
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.SomeOf(1, [
iaa.Crop((2, 0, 2, 0), keep_size=False),
iaa.Crop((1, 0, 1, 0), keep_size=False)
])
expected_shapes = [(4, 8, 3), (6, 8, 3)]
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image] * 4))
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images([image] * 4)
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(image)
assert observed.shape in expected_shapes
def test_with_children_that_change_shapes_keep_size_true(self):
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.SomeOf(1, [
iaa.Crop((2, 0, 2, 0), keep_size=True),
iaa.Crop((1, 0, 1, 0), keep_size=True)
])
expected_shapes = [(8, 8, 3)]
for _ in sm.xrange(10):
observed = aug.augment_images(np.uint8([image] * 4))
assert ia.is_np_array(observed)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images([image] * 4)
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_images([image])
assert isinstance(observed, list)
assert np.all([img.shape in expected_shapes for img in observed])
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
assert observed.shape in expected_shapes
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
for random_order in [False, True]:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.SomeOf(
1, [iaa.Identity()], random_order=random_order)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
for random_order in [False, True]:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.SomeOf(
1, [iaa.Identity()], random_order=random_order)
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_other_dtypes_via_noop__bool(self):
for random_order in [False, True]:
with self.subTest(random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Identity(),
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == image)
def test_other_dtypes_via_noop__uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
random_orders = [False, True]
for dtype, random_order in itertools.product(dtypes, random_orders):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Identity(),
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_via_noop__float(self):
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
random_orders = [False, True]
for random_order in random_orders:
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Identity(),
iaa.Identity(),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_via_flip__bool(self):
for random_order in [False, True]:
with self.subTest(random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
expected = [np.zeros((3, 3), dtype=bool)
for _ in sm.xrange(3)]
expected[0][0, 2] = True
expected[1][2, 0] = True
expected[2][2, 2] = True
for _ in sm.xrange(10):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert any([np.all(image_aug == expected_i)
for expected_i in expected])
def test_other_dtypes_via_flip__uint_int(self):
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
random_orders = [False, True]
for dtype, random_order in itertools.product(dtypes, random_orders):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Identity()
], random_order=random_order)
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype)
for _ in sm.xrange(3)]
expected[0][0, 2] = value
expected[1][2, 0] = value
expected[2][2, 2] = value
for _ in sm.xrange(10):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert any([np.all(image_aug == expected_i)
for expected_i in expected])
def test_other_dtypes_via_flip__float(self):
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
random_orders = [False, True]
for random_order in random_orders:
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype, random_order=random_order):
aug = iaa.SomeOf(2, [
iaa.Fliplr(1.0),
iaa.Flipud(1.0),
iaa.Identity()
], random_order=random_order)
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype)
for _ in sm.xrange(3)]
expected[0][0, 2] = value
expected[1][2, 0] = value
expected[2][2, 2] = value
for _ in sm.xrange(10):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert any([np.all(image_aug == expected_i)
for expected_i in expected])
def test_pickleable(self):
aug = iaa.SomeOf((0, 3),
[iaa.Add(1, seed=1),
iaa.Add(2, seed=2),
iaa.Multiply(1.5, seed=3),
iaa.Multiply(2.0, seed=4)],
random_order=True,
seed=5)
runtest_pickleable_uint8_img(aug, iterations=5)
def test_get_children_lists(self):
child = iaa.Identity()
aug = iaa.SomeOf(1, [child])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 1
assert len(children_lsts[0]) == 1
assert children_lsts[0][0] is child
def test_to_deterministic(self):
child = iaa.Identity()
aug = iaa.SomeOf(1, [child])
aug_det = aug.to_deterministic()
assert aug_det.random_state is not aug.random_state
assert aug_det.deterministic
assert aug_det[0].deterministic
class TestOneOf(unittest.TestCase):
def setUp(self):
reseed()
def test_returns_someof(self):
child = iaa.Identity()
aug = iaa.OneOf(children=child)
assert isinstance(aug, iaa.SomeOf)
assert aug.n == 1
assert aug[0] is child
def test_single_child_that_is_augmenter(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
aug = iaa.OneOf(children=iaa.Add(1))
observed = aug.augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
def test_single_child_that_is_sequential(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
aug = iaa.OneOf(children=iaa.Sequential([iaa.Add(1)]))
observed = aug.augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
def test_single_child_that_is_list(self):
zeros = np.zeros((3, 3, 1), dtype=np.uint8)
aug = iaa.OneOf(children=[iaa.Add(1)])
observed = aug.augment_image(zeros)
assert np.array_equal(observed, zeros + 1)
def test_three_children(self):
zeros = np.zeros((1, 1, 1), dtype=np.uint8)
augs = [iaa.Add(1), iaa.Add(2), iaa.Add(3)]
aug = iaa.OneOf(augs)
results = {1: 0, 2: 0, 3: 0}
nb_iterations = 1000
for _ in sm.xrange(nb_iterations):
result = aug.augment_image(zeros)
s = int(np.sum(result))
results[s] += 1
expected = int(nb_iterations / len(augs))
tolerance = int(nb_iterations * 0.05)
for key, val in results.items():
assert np.isclose(val, expected, rtol=0, atol=tolerance)
assert len(list(results.keys())) == 3
def test_pickleable(self):
aug = iaa.OneOf(
[iaa.Add(1, seed=1),
iaa.Add(10, seed=2),
iaa.Multiply(2.0, seed=3)],
seed=4)
runtest_pickleable_uint8_img(aug, iterations=5)
def test_get_children_lists(self):
child = iaa.Identity()
aug = iaa.OneOf([child])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 1
assert len(children_lsts[0]) == 1
assert children_lsts[0][0] is child
def test_to_deterministic(self):
child = iaa.Identity()
aug = iaa.OneOf([child])
aug_det = aug.to_deterministic()
assert aug_det.random_state is not aug.random_state
assert aug_det.deterministic
assert aug_det[0].deterministic
class TestSometimes(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
image = np.array([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image)
@property
def images(self):
return np.uint8([self.image])
@property
def image_lr(self):
image_lr = np.array([[1, 1, 0],
[1, 0, 0],
[1, 0, 0]], dtype=np.uint8) * 255
return np.atleast_3d(image_lr)
@property
def images_lr(self):
return np.uint8([self.image_lr])
@property
def image_ud(self):
image_ud = np.array([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]], dtype=np.uint8) * 255
return np.atleast_3d(image_ud)
@property
def images_ud(self):
return np.uint8([self.image_ud])
@property
def keypoints(self):
keypoints = [ia.Keypoint(x=1, y=0),
ia.Keypoint(x=2, y=0),
ia.Keypoint(x=2, y=1)]
return ia.KeypointsOnImage(keypoints, shape=self.image.shape)
@property
def keypoints_lr(self):
keypoints = [ia.Keypoint(x=3-1, y=0),
ia.Keypoint(x=3-2, y=0),
ia.Keypoint(x=3-2, y=1)]
return ia.KeypointsOnImage(keypoints, shape=self.image.shape)
@property
def keypoints_ud(self):
keypoints = [ia.Keypoint(x=1, y=3-0),
ia.Keypoint(x=2, y=3-0),
ia.Keypoint(x=2, y=3-1)]
return ia.KeypointsOnImage(keypoints, shape=self.image.shape)
@property
def polygons(self):
polygons = [ia.Polygon([(0, 0), (2, 0), (2, 2)])]
return ia.PolygonsOnImage(polygons, shape=self.image.shape)
@property
def polygons_lr(self):
polygons = [ia.Polygon([(3-0, 0), (3-2, 0), (3-2, 2)])]
return ia.PolygonsOnImage(polygons, shape=self.image.shape)
@property
def polygons_ud(self):
polygons = [ia.Polygon([(0, 3-0), (2, 3-0), (2, 3-2)])]
return ia.PolygonsOnImage(polygons, shape=self.image.shape)
@property
def lsoi(self):
lss = [ia.LineString([(0, 0), (2, 0), (2, 2)])]
return ia.LineStringsOnImage(lss, shape=self.image.shape)
@property
def lsoi_lr(self):
lss = [ia.LineString([(3-0, 0), (3-2, 0), (3-2, 2)])]
return ia.LineStringsOnImage(lss, shape=self.image.shape)
@property
def lsoi_ud(self):
lss = [ia.LineString([(0, 3-0), (2, 3-0), (2, 3-2)])]
return ia.LineStringsOnImage(lss, shape=self.image.shape)
@property
def bbsoi(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=1.5, y2=1.0)]
return ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
@property
def bbsoi_lr(self):
x1 = 3-0
y1 = 0
x2 = 3-1.5
y2 = 1.0
bbs = [ia.BoundingBox(x1=min([x1, x2]), y1=min([y1, y2]),
x2=max([x1, x2]), y2=max([y1, y2]))]
return ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
@property
def bbsoi_ud(self):
x1 = 0
y1 = 3-0
x2 = 1.5
y2 = 3-1.0
bbs = [ia.BoundingBox(x1=min([x1, x2]), y1=min([y1, y2]),
x2=max([x1, x2]), y2=max([y1, y2]))]
return ia.BoundingBoxesOnImage(bbs, shape=self.image.shape)
@property
def heatmaps(self):
heatmaps_arr = np.float32([[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
@property
def heatmaps_lr(self):
heatmaps_arr = np.float32([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
@property
def heatmaps_ud(self):
heatmaps_arr = np.float32([[0.0, 1.0, 1.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 1.0]])
return ia.HeatmapsOnImage(heatmaps_arr, shape=(3, 3, 3))
@property
def segmaps(self):
segmaps_arr = np.int32([[0, 0, 1],
[0, 0, 1],
[0, 1, 1]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
@property
def segmaps_lr(self):
segmaps_arr = np.int32([[1, 0, 0],
[1, 0, 0],
[1, 1, 0]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
@property
def segmaps_ud(self):
segmaps_arr = np.int32([[0, 1, 1],
[0, 0, 1],
[0, 0, 1]])
return ia.SegmentationMapsOnImage(segmaps_arr, shape=(3, 3, 3))
def test_two_branches_always_first__images(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_lr)
def test_two_branches_always_first__images__deterministic(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_lr)
def test_two_branches_always_first__images__list(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_images([self.images[0]])
assert array_equal_lists(observed, [self.images_lr[0]])
def test_two_branches_always_first__images__deterministic__list(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.images[0]])
assert array_equal_lists(observed, [self.images_lr[0]])
def test_two_branches_always_first__keypoints(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_keypoints(self.keypoints)
assert keypoints_equal(observed, self.keypoints_lr)
def test_two_branches_always_first__keypoints__deterministic(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_keypoints(self.keypoints)
assert_cbaois_equal(observed, self.keypoints_lr)
def test_two_branches_always_first__polygons(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_polygons([self.polygons])
assert_cbaois_equal(observed, [self.polygons_lr])
def test_two_branches_always_first__polygons__deterministic(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_polygons([self.polygons])
assert_cbaois_equal(observed, [self.polygons_lr])
def test_two_branches_always_first__line_strings(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_line_strings([self.lsoi])
assert_cbaois_equal(observed, [self.lsoi_lr])
def test_two_branches_always_first__line_strings__deterministic(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_line_strings([self.lsoi])
assert_cbaois_equal(observed, [self.lsoi_lr])
def test_two_branches_always_first__bounding_boxes(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_bounding_boxes([self.bbsoi])
assert_cbaois_equal(observed, [self.bbsoi_lr])
def test_two_branches_always_first__bounding_boxes__deterministic(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_bounding_boxes([self.bbsoi])
assert_cbaois_equal(observed, [self.bbsoi_lr])
def test_two_branches_always_first__heatmaps(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_heatmaps([self.heatmaps])[0]
assert observed.shape == self.heatmaps.shape
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), self.heatmaps_lr.get_arr())
def test_two_branches_always_first__segmaps(self):
aug = iaa.Sometimes(1.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == self.segmaps.shape
assert np.array_equal(observed.get_arr(), self.segmaps_lr.get_arr())
def test_two_branches_always_second__images(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_images(self.images)
assert np.array_equal(observed, self.images_ud)
def test_two_branches_always_second__images__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_images(self.images)
assert np.array_equal(observed, self.images_ud)
def test_two_branches_always_second__images__list(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_images([self.images[0]])
assert array_equal_lists(observed, [self.images_ud[0]])
def test_two_branches_always_second__images__list__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_images([self.images[0]])
assert array_equal_lists(observed, [self.images_ud[0]])
def test_two_branches_always_second__keypoints(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed[0], self.keypoints_ud)
def test_two_branches_always_second__keypoints__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_keypoints([self.keypoints])
assert_cbaois_equal(observed[0], self.keypoints_ud)
def test_two_branches_always_second__polygons(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_ud)
def test_two_branches_always_second__polygons__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_polygons(self.polygons)
assert_cbaois_equal(observed, self.polygons_ud)
def test_two_branches_always_second__line_strings(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_ud)
def test_two_branches_always_second__line_strings__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_line_strings(self.lsoi)
assert_cbaois_equal(observed, self.lsoi_ud)
def test_two_branches_always_second__bounding_boxes(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_ud)
def test_two_branches_always_second__bounding_boxes__deterministic(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
observed = aug_det.augment_bounding_boxes(self.bbsoi)
assert_cbaois_equal(observed, self.bbsoi_ud)
def test_two_branches_always_second__heatmaps(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_heatmaps(self.heatmaps)
assert observed.shape == self.heatmaps.shape
assert 0 - 1e-6 < observed.min_value < 0 + 1e-6
assert 1 - 1e-6 < observed.max_value < 1 + 1e-6
assert np.array_equal(observed.get_arr(), self.heatmaps_ud.get_arr())
def test_two_branches_always_second__segmaps(self):
aug = iaa.Sometimes(0.0, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
observed = aug.augment_segmentation_maps(self.segmaps)
assert observed.shape == self.segmaps.shape
assert np.array_equal(observed.get_arr(), self.segmaps_ud.get_arr())
def test_two_branches_both_50_percent__images(self):
aug = iaa.Sometimes(0.5, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
last_aug = None
nb_changed_aug = 0
nb_iterations = 500
nb_images_if_branch = 0
nb_images_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
if np.array_equal(observed_aug, self.images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, self.images_ud):
nb_images_else_branch += 1
else:
raise Exception(
"Received output doesnt match any expected output.")
p_if_branch = nb_images_if_branch / nb_iterations
p_else_branch = nb_images_else_branch / nb_iterations
p_changed = 1 - (nb_changed_aug / nb_iterations)
assert np.isclose(p_if_branch, 0.5, rtol=0, atol=0.1)
assert np.isclose(p_else_branch, 0.5, rtol=0, atol=0.1)
# should be the same in roughly 50% of all cases
assert np.isclose(p_changed, 0.5, rtol=0, atol=0.1)
def test_two_branches_both_50_percent__images__deterministic(self):
aug = iaa.Sometimes(0.5, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 20
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
@classmethod
def _test_two_branches_both_50_percent__cbaois(
cls, cbaoi, cbaoi_lr, cbaoi_ud, augf_name):
def _same_coords(cbaoi1, cbaoi2):
assert len(cbaoi1.items) == len(cbaoi2.items)
for i1, i2 in zip(cbaoi1.items, cbaoi2.items):
if not np.allclose(i1.coords, i2.coords, atol=1e-4, rtol=0):
return False
return True
aug = iaa.Sometimes(0.5, [iaa.Fliplr(1.0)], [iaa.Flipud(1.0)])
nb_iterations = 250
nb_if_branch = 0
nb_else_branch = 0
for i in sm.xrange(nb_iterations):
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
# use allclose() instead of coords_almost_equals() for efficiency
if _same_coords(cbaoi_aug, cbaoi_lr):
nb_if_branch += 1
elif _same_coords(cbaoi_aug, cbaoi_ud):
nb_else_branch += 1
else:
raise Exception(
"Received output doesnt match any expected output.")
p_if_branch = nb_if_branch / nb_iterations
p_else_branch = nb_else_branch / nb_iterations
assert np.isclose(p_if_branch, 0.5, rtol=0, atol=0.15)
assert np.isclose(p_else_branch, 0.5, rtol=0, atol=0.15)
def test_two_branches_both_50_percent__keypoints(self):
self._test_two_branches_both_50_percent__cbaois(
self.keypoints, self.keypoints_lr, self.keypoints_ud,
"augment_keypoints")
def test_two_branches_both_50_percent__polygons(self):
self._test_two_branches_both_50_percent__cbaois(
self.polygons, self.polygons_lr, self.polygons_ud,
"augment_polygons")
def test_two_branches_both_50_percent__line_strings(self):
self._test_two_branches_both_50_percent__cbaois(
self.lsoi, self.lsoi_lr, self.lsoi_ud,
"augment_line_strings")
def test_two_branches_both_50_percent__bounding_boxes(self):
self._test_two_branches_both_50_percent__cbaois(
self.bbsoi, self.bbsoi_lr, self.bbsoi_ud,
"augment_bounding_boxes")
def test_one_branch_50_percent__images(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
last_aug = None
nb_changed_aug = 0
nb_iterations = 500
nb_images_if_branch = 0
nb_images_else_branch = 0
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(self.images)
if i == 0:
last_aug = observed_aug
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
last_aug = observed_aug
if np.array_equal(observed_aug, self.images_lr):
nb_images_if_branch += 1
elif np.array_equal(observed_aug, self.images):
nb_images_else_branch += 1
else:
raise Exception(
"Received output doesnt match any expected output.")
p_if_branch = nb_images_if_branch / nb_iterations
p_else_branch = nb_images_else_branch / nb_iterations
p_changed = 1 - (nb_changed_aug / nb_iterations)
assert np.isclose(p_if_branch, 0.5, rtol=0, atol=0.1)
assert np.isclose(p_else_branch, 0.5, rtol=0, atol=0.1)
# should be the same in roughly 50% of all cases
assert np.isclose(p_changed, 0.5, rtol=0, atol=0.1)
def test_one_branch_50_percent__images__deterministic(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
aug_det = aug.to_deterministic()
last_aug_det = None
nb_changed_aug_det = 0
nb_iterations = 10
for i in sm.xrange(nb_iterations):
observed_aug_det = aug_det.augment_images(self.images)
if i == 0:
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug_det = observed_aug_det
assert nb_changed_aug_det == 0
@classmethod
def _test_one_branch_50_percent__cbaois(
cls, cbaoi, cbaoi_lr, augf_name):
def _same_coords(cbaoi1, cbaoi2):
assert len(cbaoi1.items) == len(cbaoi2.items)
for i1, i2 in zip(cbaoi1.items, cbaoi2.items):
if not np.allclose(i1.coords, i2.coords, atol=1e-4, rtol=0):
return False
return True
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
nb_iterations = 250
nb_if_branch = 0
nb_else_branch = 0
for i in sm.xrange(nb_iterations):
cbaoi_aug = getattr(aug, augf_name)(cbaoi)
# use allclose() instead of coords_almost_equals() for efficiency
if _same_coords(cbaoi_aug, cbaoi_lr):
nb_if_branch += 1
elif _same_coords(cbaoi_aug, cbaoi):
nb_else_branch += 1
else:
raise Exception(
"Received output doesnt match any expected output.")
p_if_branch = nb_if_branch / nb_iterations
p_else_branch = nb_else_branch / nb_iterations
assert np.isclose(p_if_branch, 0.5, rtol=0, atol=0.15)
assert np.isclose(p_else_branch, 0.5, rtol=0, atol=0.15)
def test_one_branch_50_percent__keypoints(self):
self._test_one_branch_50_percent__cbaois(
self.keypoints, self.keypoints_lr, "augment_keypoints")
def test_one_branch_50_percent__polygons(self):
self._test_one_branch_50_percent__cbaois(
self.polygons, self.polygons_lr, "augment_polygons")
def test_one_branch_50_percent__bounding_boxes(self):
self._test_one_branch_50_percent__cbaois(
self.bbsoi, self.bbsoi_lr, "augment_bounding_boxes")
@classmethod
def _test_empty_cbaoi(cls, cbaoi, augf_name):
aug = iaa.Sometimes(0.5, iaa.Identity())
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
def test_empty_keypoints(self):
kpsoi = ia.KeypointsOnImage([], shape=(1, 2, 3))
self._test_empty_cbaoi(kpsoi, "augment_keypoints")
def test_empty_polygons(self):
psoi = ia.PolygonsOnImage([], shape=(1, 2, 3))
self._test_empty_cbaoi(psoi, "augment_polygons")
def test_empty_line_strings(self):
lsoi = ia.LineStringsOnImage([], shape=(1, 2, 3))
self._test_empty_cbaoi(lsoi, "augment_line_strings")
def test_empty_bounding_boxes(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(1, 2, 3))
self._test_empty_cbaoi(bbsoi, "augment_bounding_boxes")
def test_p_is_stochastic_parameter(self):
image = np.zeros((1, 1), dtype=np.uint8) + 100
images = [image] * 10
aug = iaa.Sometimes(
p=iap.Binomial(iap.Choice([0.0, 1.0])),
then_list=iaa.Add(10))
seen = [0, 0]
for _ in sm.xrange(100):
observed = aug.augment_images(images)
uq = np.unique(np.uint8(observed))
assert len(uq) == 1
if uq[0] == 100:
seen[0] += 1
elif uq[0] == 110:
seen[1] += 1
else:
assert False
assert seen[0] > 20
assert seen[1] > 20
def test_bad_datatype_for_p_fails(self):
got_exception = False
try:
_ = iaa.Sometimes(p="foo")
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_bad_datatype_for_then_list_fails(self):
got_exception = False
try:
_ = iaa.Sometimes(p=0.2, then_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_bad_datatype_for_else_list_fails(self):
got_exception = False
try:
_ = iaa.Sometimes(p=0.2, then_list=None, else_list=False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_two_branches_both_none(self):
aug = iaa.Sometimes(0.2, then_list=None, else_list=None)
image = np.random.randint(0, 255, size=(16, 16), dtype=np.uint8)
observed = aug.augment_image(image)
assert np.array_equal(observed, image)
def test_using_hooks_to_deactivate_propagation(self):
image = np.random.randint(0, 255-10, size=(16, 16), dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Add(10))
def _propagator(images, augmenter, parents, default):
return False if augmenter == aug else default
hooks = ia.HooksImages(propagator=_propagator)
observed1 = aug.augment_image(image)
observed2 = aug.augment_image(image, hooks=hooks)
assert np.array_equal(observed1, image + 10)
assert np.array_equal(observed2, image)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Identity())
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Sometimes(1.0, iaa.Identity())
image_aug = aug(image=image)
assert np.all(image_aug == 0)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_get_parameters(self):
aug = iaa.Sometimes(0.75)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert 0.75 - 1e-8 < params[0].p.value < 0.75 + 1e-8
def test___str___and___repr__(self):
then_list = iaa.Add(1)
else_list = iaa.Add(2)
aug = iaa.Sometimes(
0.5,
then_list=then_list,
else_list=else_list,
name="SometimesTest")
expected_p = "Binomial(Deterministic(float 0.50000000))"
expected_then_list = (
"Sequential("
"name=SometimesTest-then, "
"random_order=False, "
"children=[%s], "
"deterministic=False"
")" % (str(then_list),))
expected_else_list = (
"Sequential("
"name=SometimesTest-else, "
"random_order=False, "
"children=[%s], "
"deterministic=False"
")" % (str(else_list),))
expected = (
"Sometimes("
"p=%s, name=%s, then_list=%s, else_list=%s, deterministic=%s"
")" % (
expected_p,
"SometimesTest",
expected_then_list,
expected_else_list,
"False"))
observed_str = aug.__str__()
observed_repr = aug.__repr__()
assert observed_str == expected
assert observed_repr == expected
def test___str___and___repr___with_nones_as_children(self):
aug = iaa.Sometimes(
0.5,
then_list=None,
else_list=None,
name="SometimesTest")
expected_p = "Binomial(Deterministic(float 0.50000000))"
expected = (
"Sometimes("
"p=%s, "
"name=%s, "
"then_list=%s, "
"else_list=%s, "
"deterministic=%s"
")" % (
expected_p,
"SometimesTest",
"None",
"None",
"False"))
observed_str = aug.__str__()
observed_repr = aug.__repr__()
assert observed_str == expected
assert observed_repr == expected
def test_shapes_changed_by_children__no_keep_size_non_stochastic(self):
# Test for https://github.com/aleju/imgaug/issues/143
# (shapes change in child augmenters, leading to problems if input
# arrays are assumed to stay input arrays)
def _assert_all_valid_shapes(images):
expected_shapes = [(4, 8, 3), (6, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=False),
iaa.Crop((1, 0, 1, 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(image)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__no_keep_size_stochastic(self):
def _assert_all_valid_shapes(images):
assert np.all([
16 <= img.shape[0] <= 30
and img.shape[1:] == (32, 3) for img in images
])
image = np.zeros((32, 32, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=False),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=False)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert isinstance(observed, list) or ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(image)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__keep_size_non_stochastic(self):
def _assert_all_valid_shapes(images):
expected_shapes = [(8, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop((2, 0, 2, 0), keep_size=True),
iaa.Crop((1, 0, 1, 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
_assert_all_valid_shapes([observed])
def test_shapes_changed_by_children__keep_size_stochastic(self):
def _assert_all_valid_shapes(images):
# only one shape expected here despite stochastic crop ranges
# due to keep_size=True
expected_shapes = [(8, 8, 3)]
assert np.all([img.shape in expected_shapes for img in images])
image = np.zeros((8, 8, 3), dtype=np.uint8)
aug = iaa.Sometimes(
0.5,
iaa.Crop(((1, 4), 0, (1, 4), 0), keep_size=True),
iaa.Crop(((4, 8), 0, (4, 8), 0), keep_size=True)
)
for _ in sm.xrange(10):
observed = aug.augment_images(
np.uint8([image, image, image, image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image, image, image, image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_images(np.uint8([image]))
assert ia.is_np_array(observed)
_assert_all_valid_shapes(observed)
observed = aug.augment_images([image])
assert isinstance(observed, list)
_assert_all_valid_shapes(observed)
observed = aug.augment_image(image)
assert ia.is_np_array(observed)
_assert_all_valid_shapes([observed])
def test_other_dtypes_via_noop__bool(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == image)
def test_other_dtypes_via_noop__uint_int(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_via_noop__float(self):
aug = iaa.Sometimes(1.0, iaa.Identity())
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_via_flip__bool(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
image = np.zeros((3, 3), dtype=bool)
image[0, 0] = True
expected = [np.zeros((3, 3), dtype=bool) for _ in sm.xrange(2)]
expected[0][0, 2] = True
expected[1][2, 0] = True
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_via_flip__uint_int(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, _center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype) for _ in sm.xrange(2)]
expected[0][0, 2] = value
expected[1][2, 0] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_via_flip__float(self):
aug = iaa.Sometimes(0.5, iaa.Fliplr(1.0), iaa.Flipud(1.0))
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3), dtype=dtype)
image[0, 0] = value
expected = [np.zeros((3, 3), dtype=dtype) for _ in sm.xrange(2)]
expected[0][0, 2] = value
expected[1][2, 0] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_pickleable(self):
aug = iaa.Sometimes(0.5, iaa.Add(10), [iaa.Add(1), iaa.Multiply(2.0)],
seed=1)
runtest_pickleable_uint8_img(aug, iterations=5)
def test_get_children_lists(self):
child = iaa.Identity()
aug = iaa.Sometimes(0.5, [child])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 1
assert len(children_lsts[0]) == 1
assert children_lsts[0][0] is child
def test_get_children_lists_both_lists(self):
child = iaa.Identity()
child2 = iaa.Identity()
aug = iaa.Sometimes(0.5, [child], [child2])
children_lsts = aug.get_children_lists()
assert len(children_lsts) == 2
assert len(children_lsts[0]) == 1
assert len(children_lsts[1]) == 1
assert children_lsts[0][0] is child
assert children_lsts[1][0] is child2
def test_to_deterministic(self):
child = iaa.Identity()
child2 = iaa.Identity()
aug = iaa.Sometimes(0.5, [child], [child2])
aug_det = aug.to_deterministic()
assert aug_det.deterministic
assert aug_det.random_state is not aug.random_state
assert aug_det.then_list[0].deterministic
assert aug_det.else_list[0].deterministic
class TestWithChannels(unittest.TestCase):
def setUp(self):
reseed()
@property
def image(self):
base_img = np.zeros((3, 3, 2), dtype=np.uint8)
base_img[..., 0] += 100
base_img[..., 1] += 200
return base_img
def test_augment_only_channel_0(self):
aug = iaa.WithChannels(0, iaa.Add(10))
observed = aug.augment_image(self.image)
expected = self.image
expected[..., 0] += 10
assert np.allclose(observed, expected)
def test_augment_only_channel_1(self):
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_image(self.image)
expected = self.image
expected[..., 1] += 10
assert np.allclose(observed, expected)
def test_augment_all_channels_via_none(self):
aug = iaa.WithChannels(None, iaa.Add(10))
observed = aug.augment_image(self.image)
expected = self.image + 10
assert np.allclose(observed, expected)
def test_augment_channels_0_and_1_via_list(self):
aug = iaa.WithChannels([0, 1], iaa.Add(10))
observed = aug.augment_image(self.image)
expected = self.image + 10
assert np.allclose(observed, expected)
def test_apply_multiple_augmenters(self):
image = np.zeros((3, 3, 2), dtype=np.uint8)
image[..., 0] += 5
image[..., 1] += 10
aug = iaa.WithChannels(1, [iaa.Add(10), iaa.Multiply(2.0)])
observed = aug.augment_image(image)
expected = np.copy(image)
expected[..., 1] += 10
expected[..., 1] *= 2
assert np.allclose(observed, expected)
def test_multiple_images_given_as_array(self):
images = np.concatenate([
self.image[np.newaxis, ...],
self.image[np.newaxis, ...]],
axis=0)
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_images(images)
expected = np.copy(images)
expected[..., 1] += 10
assert np.allclose(observed, expected)
def test_multiple_images_given_as_list_of_arrays(self):
images = [self.image, self.image]
aug = iaa.WithChannels(1, iaa.Add(10))
observed = aug.augment_images(images)
expected = self.image
expected[..., 1] += 10
expected = [expected, expected]
assert array_equal_lists(observed, expected)
def test_children_list_is_none(self):
aug = iaa.WithChannels(1, children=None)
observed = aug.augment_image(self.image)
expected = self.image
assert np.array_equal(observed, expected)
def test_channels_is_empty_list(self):
aug = iaa.WithChannels([], iaa.Add(10))
observed = aug.augment_image(self.image)
expected = self.image
assert np.array_equal(observed, expected)
def test_heatmap_augmentation_single_channel(self):
heatmap_arr = np.float32([
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 1.0, 1.0]
])
heatmap = HeatmapsOnImage(heatmap_arr, shape=(3, 3, 3))
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels(1, children=[affine])
heatmap_aug = aug.augment_heatmaps(heatmap)
assert heatmap_aug.shape == (3, 3, 3)
assert np.allclose(heatmap_aug.get_arr(), heatmap_arr)
def test_heatmap_augmentation_multiple_channels(self):
heatmap_arr = np.float32([
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 1.0, 1.0]
])
heatmap_arr_shifted = np.float32([
[0.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0]
])
heatmap = HeatmapsOnImage(heatmap_arr, shape=(3, 3, 3))
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels([0, 1, 2], children=[affine])
heatmap_aug = aug.augment_heatmaps(heatmap)
assert heatmap_aug.shape == (3, 3, 3)
assert np.allclose(heatmap_aug.get_arr(), heatmap_arr_shifted)
def test_segmentation_map_augmentation_single_channel(self):
segmap_arr = np.int32([
[0, 0, 1],
[0, 1, 1],
[1, 1, 1]
])
segmap = SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
aug = iaa.WithChannels(1, children=[iaa.Affine(translate_px={"x": 1})])
segmap_aug = aug.augment_segmentation_maps(segmap)
assert segmap_aug.shape == (3, 3, 3)
assert np.array_equal(segmap_aug.get_arr(), segmap_arr)
def test_segmentation_map_augmentation_multiple_channels(self):
segmap_arr = np.int32([
[0, 0, 1],
[0, 1, 1],
[1, 1, 1]
])
segmap_arr_shifted = np.int32([
[0, 0, 0],
[0, 0, 1],
[0, 1, 1]
])
segmap = SegmentationMapsOnImage(segmap_arr, shape=(3, 3, 3))
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels([0, 1, 2], children=[affine])
segmap_aug = aug.augment_segmentation_maps(segmap)
assert segmap_aug.shape == (3, 3, 3)
assert np.array_equal(segmap_aug.get_arr(), segmap_arr_shifted)
@classmethod
def _test_cbaoi_augmentation_single_channel(cls, cbaoi, augf_name):
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels(1, children=[affine])
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
@classmethod
def _test_cbaoi_augmentation_all_channels_via_list(cls, cbaoi, cbaoi_x,
augf_name):
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels([0, 1, 2], children=[affine])
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_x)
@classmethod
def _test_cbaoi_augmentation_subset_of_channels(cls, cbaoi, cbaoi_x,
augf_name):
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels([0, 1], children=[affine])
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi_x)
@classmethod
def _test_cbaoi_augmentation_with_empty_cbaoi(cls, cbaoi, augf_name):
affine = iaa.Affine(translate_px={"x": 1})
aug = iaa.WithChannels([0, 1], children=[affine])
observed = getattr(aug, augf_name)(cbaoi)
assert_cbaois_equal(observed, cbaoi)
def test_keypoint_augmentation_single_channel(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=2)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 6, 3))
self._test_cbaoi_augmentation_single_channel(kpsoi, "augment_keypoints")
def test_keypoint_augmentation_all_channels_via_list(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=2)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 6, 3))
kpsoi_x = kpsoi.shift(x=1)
self._test_cbaoi_augmentation_all_channels_via_list(
kpsoi, kpsoi_x, "augment_keypoints")
def test_keypoint_augmentation_subset_of_channels(self):
kps = [ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=2)]
kpsoi = ia.KeypointsOnImage(kps, shape=(5, 6, 3))
kpsoi_x = kpsoi.shift(x=1)
self._test_cbaoi_augmentation_subset_of_channels(
kpsoi, kpsoi_x, "augment_keypoints")
def test_keypoint_augmentation_with_empty_keypoints_instance(self):
kpsoi = ia.KeypointsOnImage([], shape=(5, 6, 3))
self._test_cbaoi_augmentation_with_empty_cbaoi(
kpsoi, "augment_keypoints")
def test_polygon_augmentation(self):
polygons = [ia.Polygon([(0, 0), (3, 0), (3, 3), (0, 3)])]
psoi = ia.PolygonsOnImage(polygons, shape=(5, 6, 3))
self._test_cbaoi_augmentation_single_channel(psoi, "augment_polygons")
def test_polygon_augmentation_all_channels_via_list(self):
polygons = [ia.Polygon([(0, 0), (3, 0), (3, 3), (0, 3)])]
psoi = ia.PolygonsOnImage(polygons, shape=(5, 6, 3))
psoi_x = psoi.shift(x=1)
self._test_cbaoi_augmentation_all_channels_via_list(
psoi, psoi_x, "augment_polygons")
def test_polygon_augmentation_subset_of_channels(self):
polygons = [ia.Polygon([(0, 0), (3, 0), (3, 3), (0, 3)])]
psoi = ia.PolygonsOnImage(polygons, shape=(5, 6, 3))
psoi_x = psoi.shift(x=1)
self._test_cbaoi_augmentation_subset_of_channels(
psoi, psoi_x, "augment_polygons")
def test_polygon_augmentation_with_empty_polygons_instance(self):
psoi = ia.PolygonsOnImage([], shape=(5, 6, 3))
self._test_cbaoi_augmentation_with_empty_cbaoi(
psoi, "augment_polygons")
def test_line_string_augmentation(self):
lss = [ia.LineString([(0, 0), (3, 0), (3, 3), (0, 3)])]
lsoi = ia.LineStringsOnImage(lss, shape=(5, 6, 3))
self._test_cbaoi_augmentation_single_channel(
lsoi, "augment_line_strings")
def test_line_string_augmentation_all_channels_via_list(self):
lss = [ia.LineString([(0, 0), (3, 0), (3, 3), (0, 3)])]
lsoi = ia.LineStringsOnImage(lss, shape=(5, 6, 3))
lsoi_x = lsoi.shift(x=1)
self._test_cbaoi_augmentation_all_channels_via_list(
lsoi, lsoi_x, "augment_line_strings")
def test_line_string_augmentation_subset_of_channels(self):
lss = [ia.LineString([(0, 0), (3, 0), (3, 3), (0, 3)])]
lsoi = ia.LineStringsOnImage(lss, shape=(5, 6, 3))
lsoi_x = lsoi.shift(x=1)
self._test_cbaoi_augmentation_subset_of_channels(
lsoi, lsoi_x, "augment_line_strings")
def test_line_string_augmentation_with_empty_polygons_instance(self):
lsoi = ia.LineStringsOnImage([], shape=(5, 6, 3))
self._test_cbaoi_augmentation_with_empty_cbaoi(
lsoi, "augment_line_strings")
def test_bounding_boxes_augmentation(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=1.0, y2=1.5)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(5, 6, 3))
self._test_cbaoi_augmentation_single_channel(
bbsoi, "augment_bounding_boxes")
def test_bounding_boxes_augmentation_all_channels_via_list(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=1.0, y2=1.5)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(5, 6, 3))
bbsoi_x = bbsoi.shift(x=1)
self._test_cbaoi_augmentation_all_channels_via_list(
bbsoi, bbsoi_x, "augment_bounding_boxes")
def test_bounding_boxes_augmentation_subset_of_channels(self):
bbs = [ia.BoundingBox(x1=0, y1=0, x2=1.0, y2=1.5)]
bbsoi = ia.BoundingBoxesOnImage(bbs, shape=(5, 6, 3))
bbsoi_x = bbsoi.shift(x=1)
self._test_cbaoi_augmentation_subset_of_channels(
bbsoi, bbsoi_x, "augment_bounding_boxes")
def test_bounding_boxes_augmentation_with_empty_bb_instance(self):
bbsoi = ia.BoundingBoxesOnImage([], shape=(5, 6, 3))
self._test_cbaoi_augmentation_with_empty_cbaoi(
bbsoi, "augment_bounding_boxes")
def test_invalid_datatype_for_channels_fails(self):
got_exception = False
try:
_ = iaa.WithChannels(False, iaa.Add(10))
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_invalid_datatype_for_children_fails(self):
got_exception = False
try:
_ = iaa.WithChannels(1, False)
except Exception as exc:
assert "Expected " in str(exc)
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.WithChannels([0], iaa.Add(1))
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.WithChannels([0], iaa.Add(1))
image_aug = aug(image=image)
assert np.all(image_aug[..., 0] == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_get_parameters(self):
aug = iaa.WithChannels([1], iaa.Add(10))
params = aug.get_parameters()
assert len(params) == 1
assert params[0] == [1]
def test_get_children_lists(self):
children = iaa.Sequential([iaa.Add(10)])
aug = iaa.WithChannels(1, children)
assert aug.get_children_lists() == [children]
def test_to_deterministic(self):
child = iaa.Identity()
aug = iaa.WithChannels(1, [child])
aug_det = aug.to_deterministic()
assert aug_det.deterministic
assert aug_det.random_state is not aug.random_state
assert aug_det.children[0].deterministic
def test___repr___and___str__(self):
children = iaa.Sequential([iaa.Identity()])
aug = iaa.WithChannels(1, children, name="WithChannelsTest")
expected = (
"WithChannels("
"channels=[1], "
"name=WithChannelsTest, "
"children=%s, "
"deterministic=False"
")" % (str(children),))
assert aug.__repr__() == expected
assert aug.__str__() == expected
def test_other_dtypes_via_noop__bool(self):
aug = iaa.WithChannels([0], iaa.Identity())
image = np.zeros((3, 3, 2), dtype=bool)
image[0, 0, :] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == image)
def test_other_dtypes_via_noop__uint_int(self):
aug = iaa.WithChannels([0], iaa.Identity())
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, :] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, image)
def test_other_dtypes_via_noop__float(self):
aug = iaa.WithChannels([0], iaa.Identity())
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, :] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == image)
def test_other_dtypes_via_flips__bool(self):
aug = iaa.WithChannels([0], iaa.Fliplr(1.0))
image = np.zeros((3, 3, 2), dtype=bool)
image[0, 0, :] = True
expected = np.zeros((3, 3, 2), dtype=bool)
expected[0, 2, 0] = True
expected[0, 0, 1] = True
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
assert np.all(image_aug == expected)
def test_other_dtypes_via_flips__uint_int(self):
aug = iaa.WithChannels([0], iaa.Fliplr(1.0))
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, :] = value
expected = np.zeros((3, 3, 2), dtype=dtype)
expected[0, 2, 0] = value
expected[0, 0, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.array_equal(image_aug, expected)
def test_other_dtypes_via_flips__float(self):
aug = iaa.WithChannels([0], iaa.Fliplr(1.0))
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, :] = value
expected = np.zeros((3, 3, 2), dtype=dtype)
expected[0, 2, 0] = value
expected[0, 0, 1] = value
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
assert np.all(image_aug == expected)
def test_pickleable(self):
aug = iaa.WithChannels([0], iaa.Add((1, 10), seed=2),
seed=1)
runtest_pickleable_uint8_img(aug, iterations=5)
class TestChannelShuffle(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.ChannelShuffle(p=0.9, channels=[0, 2])
assert isinstance(aug.p, iap.Binomial)
assert isinstance(aug.p.p, iap.Deterministic)
assert np.allclose(aug.p.p.value, 0.9)
assert aug.channels == [0, 2]
def test_p_is_1(self):
aug = iaa.ChannelShuffle(p=1.0)
img = np.uint8([0, 1]).reshape((1, 1, 2))
expected = [
np.uint8([0, 1]).reshape((1, 1, 2)),
np.uint8([1, 0]).reshape((1, 1, 2))
]
seen = [False, False]
for _ in sm.xrange(100):
img_aug = aug.augment_image(img)
if np.array_equal(img_aug, expected[0]):
seen[0] = True
elif np.array_equal(img_aug, expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_p_is_0(self):
aug = iaa.ChannelShuffle(p=0)
img = np.uint8([0, 1]).reshape((1, 1, 2))
for _ in sm.xrange(20):
img_aug = aug.augment_image(img)
assert np.array_equal(img_aug, img)
def test_p_is_1_and_channels_is_limited_subset(self):
aug = iaa.ChannelShuffle(p=1.0, channels=[0, 2])
img = np.uint8([0, 1, 2]).reshape((1, 1, 3))
expected = [
np.uint8([0, 1, 2]).reshape((1, 1, 3)),
np.uint8([2, 1, 0]).reshape((1, 1, 3))
]
seen = [False, False]
for _ in sm.xrange(100):
img_aug = aug.augment_image(img)
if np.array_equal(img_aug, expected[0]):
seen[0] = True
elif np.array_equal(img_aug, expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_get_parameters(self):
aug = iaa.ChannelShuffle(p=1.0, channels=[0, 2])
assert aug.get_parameters()[0] == aug.p
assert aug.get_parameters()[1] == aug.channels
def test_heatmaps_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
hm = ia.HeatmapsOnImage(np.float32([[0, 0.5, 1.0]]), shape=(4, 4, 3))
hm_aug = aug.augment_heatmaps([hm])[0]
assert hm_aug.shape == (4, 4, 3)
assert hm_aug.arr_0to1.shape == (1, 3, 1)
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_segmentation_maps_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
segmap = SegmentationMapsOnImage(np.int32([[0, 1, 2]]), shape=(4, 4, 3))
segmap_aug = aug.augment_segmentation_maps([segmap])[0]
assert segmap_aug.shape == (4, 4, 3)
assert segmap_aug.arr.shape == (1, 3, 1)
assert np.array_equal(segmap.arr, segmap_aug.arr)
def test_keypoints_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
kpsoi = ia.KeypointsOnImage([
ia.Keypoint(x=3, y=1), ia.Keypoint(x=2, y=4)
], shape=(10, 10, 3))
kpsoi_aug = aug.augment_keypoints([kpsoi])[0]
assert_cbaois_equal(kpsoi_aug, kpsoi)
def test_polygons_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
psoi = ia.PolygonsOnImage([
ia.Polygon([(0, 0), (5, 0), (5, 5)])
], shape=(10, 10, 3))
psoi_aug = aug.augment_polygons(psoi)
assert_cbaois_equal(psoi_aug, psoi)
def test_line_strings_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
lsoi = ia.LineStringsOnImage([
ia.LineString([(0, 0), (5, 0), (5, 5)])
], shape=(10, 10, 3))
lsoi_aug = aug.augment_line_strings(lsoi)
assert_cbaois_equal(lsoi_aug, lsoi)
def test_bounding_boxes_must_not_change(self):
aug = iaa.ChannelShuffle(p=1.0)
bbsoi = ia.BoundingBoxesOnImage([
ia.BoundingBox(x1=0, y1=0, x2=1.0, y2=1.5)
], shape=(10, 10, 3))
bbsoi_aug = aug.augment_bounding_boxes(bbsoi)
assert_cbaois_equal(bbsoi_aug, bbsoi)
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ChannelShuffle(1.0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ChannelShuffle(1.0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == shape
def test_other_dtypes_bool(self):
aug = iaa.ChannelShuffle(p=0.5)
image = np.zeros((3, 3, 2), dtype=bool)
image[0, 0, 0] = True
expected = [np.zeros((3, 3, 2), dtype=bool) for _ in sm.xrange(2)]
expected[0][0, 0, 0] = True
expected[1][0, 0, 1] = True
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == image.dtype.name
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_uint_int(self):
aug = iaa.ChannelShuffle(p=0.5)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int32", "int64"]
for dtype in dtypes:
with self.subTest(dtype=dtype):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dtype)
value = max_value
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, 0] = value
expected = [np.zeros((3, 3, 2), dtype=dtype)
for _
in sm.xrange(2)]
expected[0][0, 0, 0] = value
expected[1][0, 0, 1] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_other_dtypes_float(self):
aug = iaa.ChannelShuffle(p=0.5)
dtypes = ["float16", "float32", "float64", "float128"]
values = [5000, 1000 ** 2, 1000 ** 3, 1000 ** 4]
for dtype, value in zip(dtypes, values):
with self.subTest(dtype=dtype):
image = np.zeros((3, 3, 2), dtype=dtype)
image[0, 0, 0] = value
expected = [np.zeros((3, 3, 2), dtype=dtype)
for _
in sm.xrange(2)]
expected[0][0, 0, 0] = value
expected[1][0, 0, 1] = value
seen = [False, False]
for _ in sm.xrange(100):
image_aug = aug.augment_image(image)
assert image_aug.dtype.name == dtype
if np.all(image_aug == expected[0]):
seen[0] = True
elif np.all(image_aug == expected[1]):
seen[1] = True
else:
assert False
if np.all(seen):
break
assert np.all(seen)
def test_pickleable(self):
aug = iaa.ChannelShuffle(0.5, seed=1)
runtest_pickleable_uint8_img(aug, iterations=5, shape=(2, 2, 10))
class TestRemoveCBAsByOutOfImageFraction(unittest.TestCase):
def setUp(self):
reseed()
def test___init__(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
assert np.isclose(aug.fraction, 0.51)
def test_no_cbas_in_batch(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128",
"bool"]
for dt in dtypes:
arr = np.ones((5, 10, 3), dtype=dt)
image_aug = aug(image=arr)
assert image_aug.dtype.name == dt
assert image_aug.shape == (5, 10, 3)
if arr.dtype.kind == "f":
assert np.allclose(image_aug, 1.0)
else:
assert np.all(image_aug == 1)
def test_keypoints(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
item1 = ia.Keypoint(x=5, y=1)
item2 = ia.Keypoint(x=15, y=1)
cbaoi = ia.KeypointsOnImage([item1, item2], shape=(10, 10, 3))
cbaoi_aug = aug(keypoints=cbaoi)
assert len(cbaoi_aug.items) == 1
for item_obs, item_exp in zip(cbaoi_aug.items, [item1]):
assert item_obs.coords_almost_equals(item_exp)
def test_bounding_boxes(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
item1 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=9)
item2 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=15)
item3 = ia.BoundingBox(y1=1, x1=15, y2=6, x2=25)
cbaoi = ia.BoundingBoxesOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(bounding_boxes=cbaoi)
assert len(cbaoi_aug.items) == 2
for item_obs, item_exp in zip(cbaoi_aug.items, [item1, item2]):
assert item_obs.coords_almost_equals(item_exp)
def test_polygons(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
item1 = ia.Polygon([(5, 1), (9, 1), (9, 2), (5, 2)])
item2 = ia.Polygon([(5, 1), (15, 1), (15, 2), (5, 2)])
item3 = ia.Polygon([(15, 1), (25, 1), (25, 2), (15, 2)])
cbaoi = ia.PolygonsOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(polygons=cbaoi)
assert len(cbaoi_aug.items) == 2
for item_obs, item_exp in zip(cbaoi_aug.items, [item1, item2]):
assert item_obs.coords_almost_equals(item_exp)
def test_line_strings(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
item1 = ia.LineString([(5, 1), (9, 1)])
item2 = ia.LineString([(5, 1), (15, 1)])
item3 = ia.LineString([(15, 1), (25, 1)])
cbaoi = ia.LineStringsOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(line_strings=cbaoi)
assert len(cbaoi_aug.items) == 2
for item_obs, item_exp in zip(cbaoi_aug.items, [item1, item2]):
assert item_obs.coords_almost_equals(item_exp)
def test_get_parameters(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
params = aug.get_parameters()
assert len(params) == 1
assert np.isclose(params[0], 0.51)
def test_pickleable(self):
item1 = ia.Keypoint(x=5, y=1)
item2 = ia.Keypoint(x=15, y=1)
cbaoi = ia.KeypointsOnImage([item1, item2], shape=(10, 10, 3))
augmenter = iaa.RemoveCBAsByOutOfImageFraction(0.51)
augmenter_pkl = pickle.loads(pickle.dumps(augmenter, protocol=-1))
for _ in np.arange(3):
cbaoi_aug = augmenter(keypoints=cbaoi)
cbaoi_aug_pkl = augmenter_pkl(keypoints=cbaoi)
assert np.allclose(cbaoi_aug.to_xy_array(), cbaoi_aug_pkl.to_xy_array())
class TestClipCBAsToImagePlanes(unittest.TestCase):
def setUp(self):
reseed()
def test_no_cbas_in_batch(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64", "float128",
"bool"]
for dt in dtypes:
arr = np.ones((5, 10, 3), dtype=dt)
image_aug = aug(image=arr)
assert image_aug.dtype.name == dt
assert image_aug.shape == (5, 10, 3)
if arr.dtype.kind == "f":
assert np.allclose(image_aug, 1.0)
else:
assert np.all(image_aug == 1)
def test_keypoints(self):
aug = iaa.RemoveCBAsByOutOfImageFraction(0.51)
item1 = ia.Keypoint(x=5, y=1)
item2 = ia.Keypoint(x=15, y=1)
cbaoi = ia.KeypointsOnImage([item1, item2], shape=(10, 10, 3))
cbaoi_aug = aug(keypoints=cbaoi)
assert len(cbaoi_aug.items) == 1
for item_obs, item_exp in zip(cbaoi_aug.items, [item1]):
assert item_obs.coords_almost_equals(item_exp)
def test_bounding_boxes(self):
aug = iaa.ClipCBAsToImagePlanes()
item1 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=9)
item2 = ia.BoundingBox(y1=1, x1=5, y2=6, x2=15)
item3 = ia.BoundingBox(y1=1, x1=15, y2=6, x2=25)
cbaoi = ia.BoundingBoxesOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(bounding_boxes=cbaoi)
expected = [
ia.BoundingBox(y1=1, x1=5, y2=6, x2=9),
ia.BoundingBox(y1=1, x1=5, y2=6, x2=10)
]
assert len(cbaoi_aug.items) == len(expected)
for item_obs, item_exp in zip(cbaoi_aug.items, expected):
assert item_obs.coords_almost_equals(item_exp)
def test_polygons(self):
aug = iaa.ClipCBAsToImagePlanes()
item1 = ia.Polygon([(5, 1), (9, 1), (9, 2), (5, 2)])
item2 = ia.Polygon([(5, 1), (15, 1), (15, 2), (5, 2)])
item3 = ia.Polygon([(15, 1), (25, 1), (25, 2), (15, 2)])
cbaoi = ia.PolygonsOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(polygons=cbaoi)
expected = [
ia.Polygon([(5, 1), (9, 1), (9, 2), (5, 2)]),
ia.Polygon([(5, 1), (10, 1), (10, 2), (5, 2)])
]
assert len(cbaoi_aug.items) == len(expected)
for item_obs, item_exp in zip(cbaoi_aug.items, expected):
assert item_obs.coords_almost_equals(item_exp)
def test_line_strings(self):
aug = iaa.ClipCBAsToImagePlanes()
item1 = ia.LineString([(5, 1), (9, 1)])
item2 = ia.LineString([(5, 1), (15, 1)])
item3 = ia.LineString([(15, 1), (25, 1)])
cbaoi = ia.LineStringsOnImage([item1, item2, item3],
shape=(10, 10, 3))
cbaoi_aug = aug(line_strings=cbaoi)
expected = [
ia.LineString([(5, 1), (9, 1)]),
ia.LineString([(5, 1), (10, 1)])
]
assert len(cbaoi_aug.items) == len(expected)
for item_obs, item_exp in zip(cbaoi_aug.items, expected):
assert item_obs.coords_almost_equals(item_exp, max_distance=1e-2)
def test_get_parameters(self):
aug = iaa.ClipCBAsToImagePlanes()
params = aug.get_parameters()
assert len(params) == 0
def test_pickleable(self):
item1 = ia.Keypoint(x=5, y=1)
item2 = ia.Keypoint(x=15, y=1)
cbaoi = ia.KeypointsOnImage([item1, item2], shape=(10, 10, 3))
augmenter = iaa.ClipCBAsToImagePlanes()
augmenter_pkl = pickle.loads(pickle.dumps(augmenter, protocol=-1))
for _ in np.arange(3):
cbaoi_aug = augmenter(keypoints=cbaoi)
cbaoi_aug_pkl = augmenter_pkl(keypoints=cbaoi)
assert np.allclose(cbaoi_aug.to_xy_array(), cbaoi_aug_pkl.to_xy_array())
``` |
{
"source": "1-800-BAD-CODE/MorseCodeToolkit",
"score": 2
} |
#### File: examples/morse_to_text/convert_checkpoint.py
```python
import argparse
import logging
import os
from typing import Dict
from omegaconf import ListConfig, DictConfig
import torch
from morsecodetoolkit.models.ctc_models import EncDecCTCModel
def get_args() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Converts a .ckpt or .nemo file to a half-precision .nemo file to minimize file size.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("model")
parser.add_argument("output_file")
args: argparse.Namespace = parser.parse_args()
return args
def _remove_local_paths(cfg: Dict) -> None:
"""Removes any file names or directory paths from a configuration, presumably for releasing models.
"""
for k, v in cfg.items():
if isinstance(v, (dict, DictConfig)):
_remove_local_paths(v)
elif isinstance(v, str):
if "/" in v:
cfg[k] = None
elif isinstance(v, (list, ListConfig)):
for i, entry in enumerate(v):
if isinstance(entry, str) and "/" in entry:
cfg[k][i] = None
def main():
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s : %(message)s',
datefmt='%H:%M:%S'
)
args = get_args()
logging.info(f"Restoring from {args.model}")
m: EncDecCTCModel
if args.model.endswith(".nemo"):
m = EncDecCTCModel.restore_from(args.model, map_location=torch.device("cpu"))
else:
m = EncDecCTCModel.load_from_checkpoint(args.model, map_location="cpu")
m.eval().half()
_remove_local_paths(m.cfg)
logging.info(f"Saving model to {args.output_file}")
output_dir = os.path.dirname(args.output_file)
if output_dir:
os.makedirs(output_dir, exist_ok=True)
m.save_to(args.output_file)
if __name__ == "__main__":
main()
```
#### File: examples/morse_to_text/print_cfg.py
```python
import argparse
import logging
from omegaconf import OmegaConf, DictConfig
from morsecodetoolkit.models.ctc_models import EncDecCTCModel
def get_args() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description="Prints a model's configuration to stdout (for quick inspection).",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("model")
args: argparse.Namespace = parser.parse_args()
return args
def main():
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s : %(message)s',
datefmt='%H:%M:%S'
)
args = get_args()
logging.info(f"Restoring from {args.model}")
cfg: DictConfig
if args.model.endswith(".nemo"):
cfg = EncDecCTCModel.restore_from(args.model, return_config=True)
else:
m = EncDecCTCModel.load_from_checkpoint(args.model, map_location="cpu")
cfg = m.cfg
logging.info("Model config:")
logging.info(OmegaConf.to_yaml(cfg))
if __name__ == "__main__":
main()
```
#### File: examples/synthesize_dataset/synthesize.py
```python
import logging
from omegaconf import OmegaConf
from nemo.core.config import hydra_runner
from morsecodetoolkit.data import SyntheticMorseDataset
@hydra_runner(config_path="conf", config_name="english")
def main(cfg):
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s : %(message)s',
datefmt='%H:%M:%S'
)
logging.info(f"Hydra config: {OmegaConf.to_yaml(cfg)}")
dataset: SyntheticMorseDataset = SyntheticMorseDataset(**cfg.dataset)
dataset.synthesize(cfg.output_dir)
if __name__ == '__main__':
main()
```
#### File: morsecodetoolkit/bin/text_to_morse.py
```python
import logging
import argparse
import random
from typing import List
import soundfile
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from morsecodetoolkit.util import functional
from morsecodetoolkit.alphabet import MorseAlphabet, Symbol
def get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Generates one morse audio signal and saves it to a .wav file. Intended to be a tool for quick "
"tests and usage demonstration. For synthesizing large corpora see `examples/synthesize_dataset/`."
)
parser.add_argument("text", help="Sentence to generate morse signal from.")
parser.add_argument("output_filepath")
alphabet_opts = parser.add_argument_group("Alphabet-related options")
alphabet_opts.add_argument(
"--alphabet-name", default="international",
help="Load build-in alphabet from resource based on this name. If --alphabet-yaml is given, it has priority "
"over this option and this option is ignored."
)
alphabet_opts.add_argument(
"--alphabet-yaml", default=None,
help="If set, ignore --alphabet-name and instead load alphabet from this .yaml file."
)
alphabet_opts.add_argument("--prosign", action="store_true", help="If set, treat the input text as a prosign.")
data_opts = parser.add_argument_group("Data options")
data_opts.add_argument("--background-audio", help="If given, use this audio file as background noise.")
data_opts.add_argument("--sample-rate", type=int, default=16000, help="Sample rate to generate at.")
data_opts.add_argument("--tone-freq", type=float, default=500, help="Frequency to create the tones.")
data_opts.add_argument("--snr-db", type=float, default=10, help="SNR of morse/background noise, in dB.")
data_opts.add_argument("--gain-db", type=float, default=-10, help="Gain of morse signal, in dB.")
data_opts.add_argument("--pad-left", type=int, default=500, help="Left-side padding, in ms.")
data_opts.add_argument("--pad-right", type=int, default=500, help="Right-side padding, in ms.")
data_opts.add_argument("--rng-seed", type=int, default=1111, help="Seed for RNG.")
data_opts.add_argument("--window-name", default="hann", help="Window function to apply to tones.")
data_opts.add_argument("--window-rise-time-ms", type=int, default=12, help="Window rise time, in ms.")
data_opts.add_argument(
"--dit-duration", type=int, default=60,
help="Mean duration of a dit, and the basic unit of length for all other durations."
)
data_opts.add_argument(
"--duration-sigma", type=float, default=5,
help="The standard deviation of duration lengths, such that when randomly choosing a duration with mean mu, "
"choose N(mu, sigma). This will be scaled up with the duration; e.g., for a DASH this value will be "
"multiplied by 3 (as the DASH duration is 3x the DIT duration)."
)
args = parser.parse_args()
return args
def main():
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s] %(levelname)s : %(message)s',
datefmt='%H:%M:%S'
)
args = get_args()
# We'll pass this RNG to the functions that use randomness, for reproducable results.
rng: random.Random = random.Random(args.rng_seed)
# Resolve alphabet
alphabet: MorseAlphabet = MorseAlphabet(name=args.alphabet_name, yaml_filepath=args.alphabet_yaml)
# Convert text to dit/dash sequence
symbols: List[Symbol] = alphabet.text_to_symbols(args.text, clean=True, is_prosign=args.prosign)
# Generate the clean (morse-only) audio signal
morse_signal: AudioSegment = functional.symbols_to_signal(
symbols=symbols,
sample_rate=args.sample_rate,
gain_db=args.gain_db,
tone_frequency_hz=args.tone_freq,
dit_duration_ms=args.dit_duration,
duration_sigma=args.duration_sigma,
pad_left_ms=args.pad_left,
pad_right_ms=args.pad_right,
window_name=args.window_name,
window_rise_time_ms=args.window_rise_time_ms,
rng=rng
)
# Maybe add some noise to the morse-only audio signal
if args.background_audio is not None:
# Load background noise
noise_signal: AudioSegment = AudioSegment.from_file(
audio_file=args.background_audio,
target_sr=args.sample_rate
)
# Mix the two together
morse_signal = functional.mix_background_signal(
morse_signal=morse_signal,
noise_signal=noise_signal,
snr_db=args.snr_db
)
# Save final audio file
soundfile.write(
file=args.output_filepath,
data=morse_signal.samples,
samplerate=morse_signal.sample_rate,
format="WAV",
subtype="PCM_16"
)
if __name__ == "__main__":
main()
``` |
{
"source": "1801573781/wind",
"score": 3
} |
#### File: code/activation/last_hop_activation.py
```python
import math
import numpy as np
from activation.dichotomy import dichotomy_revise
from gl.handle_array import sum_arr, handle_arr_ex
class LastHopActivation:
"""
功能:最后一跳激活函数, 一个 base class \n
说明:神经网络的最后一跳,也需要激活函数,比如 SoftMax \n
"""
def active_array(self, nn_y):
"""
训练时,最后一跳激活
:param nn_y: 神经网络训练的输出,是一个n维数组,或者是一个数值
:return: 最后一跳激活的结果(是一个n维数组,或者是一个数值)
"""
# 默认实现,将神经网络的输出,复制一份输出
last_hop_y = np.asarray(nn_y)
return last_hop_y
''''''
def predict_revise(self, lha_y, revise_strong=False):
"""
预测时,最后一跳激活之后,再修正
:param lha_y: 最后一跳激活之后的输出
:param revise_strong: 强修正 flag
:return: 最后一跳激活之后,再修正的结果
"""
# 默认实现,将 lha_y 复制一份输出
lhr_y = np.asarray(lha_y)
return lhr_y
''''''
def derivative(self, last_hop_y, index):
"""
最后一跳激活函数的导数
:param last_hop_y: 最后一跳的输出,是一个n维数组,或者是一个数值
:param index: 最后一跳的输出的索引,是一个向量
:return: 最后一跳激活函数的导数(是一个n维数组,或者是一个数值)
"""
# 默认实现
return 1
''''''
def derivative_array(self, lha_y):
"""
最后一跳激活函数的导数
:param lha_y: 最后一跳激活函数的输出
:return: 最后一跳激活函数的导数
"""
# 默认实现,返回“1”
lhd_y = np.ones(lha_y.shape)
return lhd_y
''''''
class DichotomyLHA(LastHopActivation):
"""
功能:二分类最后一跳激活函数\n
"""
def predict_revise(self, lha_y, revise_strong=False):
"""
预测时,最后一跳激活之后,再修正
:param lha_y: 最后一跳激活之后的输出
:param revise_strong: 强修正 flag
:return: 最后一跳激活之后,再修正的结果
"""
# 将神经网络的输出,复制一份输出
lhr_y = np.zeros(lha_y.shape)
# 预测试,最后一跳需要修正,修正为二分类中的某一类
arr_list = [lha_y]
handle_arr_ex(arr_list, lhr_y, dichotomy_revise)
return lhr_y
''''''
def derivative(self, last_hop_y, index):
"""
预测时,最后一跳激活函数的导数 \n
对于 DichotomyLHA 而言,训练时,其最后一跳并没有做任何激活处理,所以导数为1 \n
:param last_hop_y: 最后一跳的输出,是一个n维数组,或者是一个数值
:param index: 最后一跳的输出的索引,是一个向量
:return: 最后一跳激活函数的导数(是一个n维数组,或者是一个数值)
"""
return 1
''''''
class SoftMaxLHA(LastHopActivation):
"""
功能:SoftMax 最后一跳激活函数\n
"""
''''''
def active_array(self, nn_y):
"""
功能:训练时,最后一跳激活\n
参数:\n
nn_y:神经网络训练的输出,是一个n维数组,或者是一个数值\n
返回值:最后一跳激活的结果(是一个n维数组,或者是一个数值)\n
"""
# 训练时,最后一跳做 soft max 处理
return SoftMaxLHA._soft_max(nn_y)
''''''
@staticmethod
def _soft_max(arr):
"""
功能:将 arr 的每个元素,求解 soft max\n
参数:\n
arr:多维数组\n
返回值:NULL\n
"""
# 1. exp_arr = exp(arr)
exp_arr = np.zeros(arr.shape)
arr_list = [arr]
handle_arr_ex(arr_list, exp_arr, SoftMaxLHA._exp)
# 2. 求 arr 各元素之和
s = [0]
sum_arr(exp_arr, s)
# 3. 求解概率
last_hop_arr = np.zeros(arr.shape)
arr_list = [exp_arr]
handle_arr_ex(arr_list, last_hop_arr, SoftMaxLHA._probability, s[0])
return last_hop_arr
''''''
@staticmethod
def _exp(*args):
"""
功能:求解 e^x
参数:\n
args:args[0][0] 为 x
返回值:e^x
"""
x = args[0][0]
return math.exp(x)
@staticmethod
def _probability(*args):
"""
功能:求解概率,P = a / s
参数:\n
args[0][0]:a,数组 arr 中的某一个元素\n
args[0][0]:s,数组所有元素之和
返回值:a / s\n
"""
a = args[0][0]
s = args[0][1]
return a / s
''''''
def predict_revise(self, lha_y, revise_strong=False):
"""
预测时,最后一跳激活之后,再修正
:param lha_y: 最后一跳激活之后的输出
:param revise_strong: 强修正 flag
:return: 最后一跳激活之后,再修正的结果
"""
if revise_strong:
return SoftMaxLHA._strong_revise(lha_y)
else:
return SoftMaxLHA._weak_revise(lha_y)
''''''
@staticmethod
def _strong_revise(lha_y):
"""
强修正:最大值修正为 1,其余值修正为 0
:param lha_y:
:return: 修正后的值
"""
# 不搞那么复杂了,因为是 softmax,所以可以肯定 lha_y 是一个 [row, 1] 矩阵(只有1列)
# 将神经网络的输出,复制一份输出
lhr_y = np.zeros(lha_y.shape)
# 获取 lha 最大值的索引
max_index = SoftMaxLHA._get_max_index(lha_y)
# 将 lhr_y 该索引位置赋值为1
lhr_y[max_index][0] = 1
return lhr_y
''''''
@staticmethod
def _get_max_index(lha_y):
"""
获取 lha_y 最大值的索引
:param lha_y: [row, 1] 矩阵
:return: lha_y 最大值的索引
"""
max_value = 0
max_index = 0
row = lha_y.shape[0]
# 可以肯定,lha_y 每个值都大于0
for r in range(0, row):
if lha_y[r][0] > max_value:
max_value = lha_y[r][0]
max_index = r
return max_index
''''''
@staticmethod
def _weak_revise(lha_y):
"""
弱修正:只有大于一定的值,才修正为1,只有小于一定的值,才修正为0
:param lha_y: [row, 1] 矩阵
:return: 修正后的值
"""
# 不搞那么复杂了,因为是 softmax,所以可以肯定 lha_y 是一个 [row, 1] 矩阵(只有1列)
# 将神经网络的输出,复制一份输出
lhr_y = np.zeros(lha_y.shape)
row = lha_y.shape[0]
max_value = 0.9
min_value = 0.1
for r in range(0, row):
if lha_y[r][0] >= max_value:
lhr_y[r][0] = 1
elif lha_y[r][0] <= min_value:
lhr_y[r][0] = 0
else:
lhr_y[r][0] = lha_y[r][0]
return lhr_y
''''''
def derivative(self, last_hop_y, index):
"""
预测时,最后一跳激活函数的导数 \n
对于 SoftMaxLHA 而言,训练时,其最后一跳虽然做了 SoftMax 处理,但是为了计算效率,将其导数的计算合并到交叉熵那里了,所以导数也为1 \n
:param last_hop_y: 最后一跳的输出,是一个n维数组,或者是一个数值
:param index: 最后一跳的输出的索引,是一个向量
:return: 最后一跳激活函数的导数(是一个n维数组,或者是一个数值)
"""
return 1
```
#### File: code/bp/bp_nn_ex.py
```python
import numpy as np
from fnn.fnn_ex import FnnEx
from gl.matrix_list import matrix_2_list, list_2_matrix
class BPFnnEx(FnnEx):
"""
分组训练的 BP 神经网络
"""
''''''
def _calc_train_para_delta(self, nn_y_list, sx, sy):
"""
计算神经网络训练参数的 delta
:param nn_y_list: 神经网络每一层的输出
:param sx: 训练样本(输入)
:param sy: 训练样本(输出)
:return: NULL
"""
# 1. 通过 bp 算法,计算 ksi
ksi_list = self._bp(nn_y_list, sy)
# 2. 通过 ksi,计算 delta w, delta b
self._calc_delta_wb(ksi_list, sx, nn_y_list)
# 子类需要 ksi_list
return ksi_list
''''''
def _modify_train_para(self):
"""
根据训练参数的 delta,修正训练参数
:return: NULL
"""
# 修正每一层的 w,b 参数
for layer in range(0, self._layer_count):
self._w_layer[layer] -= self._rate * self._delta_w_layer[layer]
self._b_layer[layer] -= self._rate * self._delta_b_layer[layer]
''''''
def _bp(self, nn_y_list, sy):
"""
后向传播,计算 ksi_list \n
1、ksi(代表希腊字母,音:科赛),是一个向量,每层都有,代表目标函数 E 对每一层中间输出的偏导 \n
2、ksi_list 记录每一层的 ksi \n
:param nn_y_list: 神经网路计算的每一层结果
:param sy: 训练样本的输出
:return: ksi_list
"""
# 1. 初始化 ksi_list
ksi_list = [0] * self._layer_count
# 2. 计算最后一层 ksi
ksi_last = self._calc_last_ksi(nn_y_list, sy)
ksi_list[self._layer_count - 1] = ksi_last
# 3. 反向传播,计算:倒数第2层 ~ 第1层的 ksi
self._bp_ksi(nn_y_list, ksi_list)
# return 计算结果
return ksi_list
''''''
def _calc_last_ksi(self, nn_y_list, sy):
"""
计算最后一层 ksi
:param nn_y_list: 神经网路计算的每一层结果
:param sy: 训练样本的输出
:return: 最后一层 ksi
"""
# 1. 计算损失函数的偏导
last_hop_y = nn_y_list[self._layer_count]
loss_dy = self._loss.derivative_array(last_hop_y, sy)
# 2. 计算最后一层 ksi
nn_y_last = nn_y_list[self._layer_count - 1]
row_last = len(nn_y_last)
ksi_last = list()
for i in range(0, row_last):
# 计算ksi_last 的每个元素
ksi_item = loss_dy[i][0] * self._last_hop_activation.derivative(last_hop_y, i) \
* self._activation.derivative(nn_y_last[i][0])
ksi_last.append(ksi_item)
ksi_last = list_2_matrix(ksi_last)
return ksi_last
''''''
def _bp_ksi(self, nn_y_list, ksi_list):
"""
反向传播,计算:倒数第2层 ~ 第1层的 ksi
:param nn_y_list: 神经网路计算的每一层结果
:param ksi_list: 存储每一层的 ksi
:return: NULL
"""
# 反向传播
for layer in range(self._layer_count - 2, -1, -1):
# 1. 求解当前层激活函数的导数
# 1.1 当前层神经网络的计算结果
nn_y_cur = nn_y_list[layer]
# 1.2 求导
dy_cur_activiation = self._activation.derivative_array(nn_y_cur)
# 1.3 将求导结果转化为对角矩阵
diag_dy = np.diag(matrix_2_list(dy_cur_activiation))
# 2. 下一层的 w 的转置
w_next_T = (self._w_layer[layer + 1]).T
# 3. 下一层的 ksi
ksi_next = ksi_list[layer + 1]
# 4. 计算当前层的 ksi: ksi_cur = diag_y * w_next_T, ksi_next
ksi_cur = np.matmul(w_next_T, ksi_next)
ksi_cur = np.matmul(diag_dy, ksi_cur)
# 5. 将本层计算出的 ksi 加入到 ksi_list
ksi_list[layer] = ksi_cur
''''''
def _calc_delta_wb(self, ksi_list, sx, nn_y_list):
"""
计算每一层的 delta_w, delta_b
:param ksi_list: 每一层的 ksi 列表
:param sx: 每一层的 ksi 列表
:param ksi_list: 每一层的 ksi 列表
:return:
"""
# 因为已经通过 bp,计算出每一层的 ksi,所以,计算 delta_w, delta_b 时,就不必使用 bp 算法了,正向计算即可
for layer in range(0, self._layer_count):
# 该层的 ksi
ksi = ksi_list[layer]
if 0 == layer:
v = sx
else:
v = nn_y_list[layer - 1]
# 非常关键的计算公式
self._delta_w_layer[layer] += np.matmul(ksi, v.T)
self._delta_b_layer[layer] += ksi
```
#### File: code/bp/test_softmax_bp_nn.py
```python
import os
import numpy as np
from activation.last_hop_activation import LastHopActivation, SoftMaxLHA
from activation.normal_activation import Sigmoid, ReLU
from loss.loss import MSELoss, CrossEntropyLoss
from bp.bp_nn_recitation import Recitation
from bp import bp_nn, bp_nn_ex
from sample.image_softmax_sample import ImageSoftMaxSample
from sample.one_poem_sample import OnePoemSample
def test_softmax():
# 1. 构建神经网络对象
# 激活函数
# activation = Sigmoid()
activation = ReLU(20)
# 最后一跳激活函数
last_hop_activation = SoftMaxLHA()
# 损失函数
loss = CrossEntropyLoss()
# 神经网络
# nn = bp_nn.BPFNN(activation, last_hop_activation, loss)
nn = bp_nn_ex.BPFnnEx(activation, last_hop_activation, loss)
# 2. 构建训练样本
# 训练样本对象
sample = ImageSoftMaxSample()
# 训练样本输入,向量维度
sx_dim = 400 # 20 * 20 的图像, 400维向量
# 训练样本输出,向量维度
sy_dim = 10 # one-hot, 10维向量
# 创建训练样本,输入/输出
train_image_root_path = "./../picture/number_softmax_train"
train_image_root_path = os.path.abspath(train_image_root_path)
sample.create_sample(train_image_root_path)
# sample.create_sample_ex(100)
# train_sx_list = sample.get_sx_list()
# train_sy_list = sample.get_sy_list()
train_sx_group = sample.get_sx_group()
train_sy_group = sample.get_sy_group()
# 3. 训练
# 每一层网络的神经元个数
neuron_count_list = [10, 10]
# 最大循环训练次数
loop_max = 30
# 学习效率
rate = 0.1
# 训练
# nn.train(train_sx_list, train_sy_list, loop_max, neuron_count_list, rate)
nn.train(train_sx_group, train_sy_group, loop_max, neuron_count_list, rate)
# 4. 测试
# 4.1 创建测试样本
test_image_root_path = "./../picture/number_softmax_test"
test_image_root_path = os.path.abspath(test_image_root_path)
sample.create_sample(test_image_root_path, confuse=False)
# sample.create_sample_ex(2)
test_sx_list = sample.get_sx_list()
test_sy_list = sample.get_sy_list()
py_list = nn.predict(test_sx_list, test_sy_list)
print("\n")
print("py:\n")
count = len(py_list)
for i in range(0, count):
number = _get_max_index(test_sy_list[i])
print("\n")
print("index = %d, number = %d" % (i, number))
print(py_list[i])
''''''
def _get_max_index(y):
"""
获取 y 中概率最大的那个元素的索引(并且该值大于等于0.9)
:param y: 或者是 sy(训练样本输出),或者是 py(预测结果输出)
:return: y 中概率最大的那个元素的索引
"""
row = y.shape[0]
for r in range(0, row):
# 如果小于 0.1 则认为是0
if y[r][0] >= 0.9:
return r
# 如果没有大于等于0.9的,则 return -1
return -1
''''''
def test_poem():
# 1. 构建神经网络对象
# 古诗选择字符
ch = "白"
# 激活函数
# activation = Sigmoid()
activation = ReLU(20)
# 最后一跳激活函数
last_hop_activation = SoftMaxLHA()
# 损失函数
loss = CrossEntropyLoss()
# 神经网络
# nn = bp_nn.BPFNN(activation, last_hop_activation, loss)
nn = Recitation(activation, last_hop_activation, loss, ch)
# 2. 构建训练样本
# 训练样本对象
sample = OnePoemSample(ch)
sample.create_sample()
train_sx_group = sample.get_sx_group()
train_sy_group = sample.get_sy_group()
# 3. 训练
# 每一层网络的神经元个数
neuron_count_list = [10, 21]
# 最大循环训练次数
loop_max = 50
# 学习效率
rate = 0.1
# 训练
nn.train(train_sx_group, train_sy_group, loop_max, neuron_count_list, rate)
# 4. 测试
# 4.1 创建测试样本
test_sx = sample.create_test_sample(ch)
# 测试
py_list = list()
nn.predict_recurrent(test_sx, py_list)
# 将测试样本放在首位,这样就组成了一首完整的诗
py_list.insert(0, ch)
print("\n")
print("py_list:\n")
print(py_list)
```
#### File: code/cnn/mean_pooling.py
```python
from cnn.convolution import Convolution, CVLDim
"""
class:MeanPooling 平均汇聚
说明:
1、最大汇聚,也可以看作是一种特殊的卷积
2、卷积核大小:K * K, 步长:S * S
3、卷积的计算时 mean 函数
"""
class MeanPooling(Convolution):
"""
功能:计算 x 某一点(i, j)的卷积
参数:
x:输入信息
y:待赋值的卷积结果
i:x 的 width index
j:x 的 height index
d: x 的 depth index
返回值: x 某一点(i, j)的卷积
"""
def _cal_cvl_on_index(self, x, y, i, j, d):
# sum 的初值 = 0
tmp = 0
for u in range(0, self.w_width):
for v in range(0, self.w_height):
# 求和
tmp += self._x_value(x, i, j, u, v, d)
# 3维卷积
if CVLDim.THREE.value == self.cvl_dim:
y[i, j, d] = tmp / (self.w_width * self.w_height)
# 2维卷积
else:
y[i, j] = tmp / (self.w_width * self.w_height)
```
#### File: code/fnn/fnn_ex.py
```python
import os
import pickle
import numpy as np
from gl import errorcode
from gl.array_string import array_2_string
from gl.common_function import get_local_time, unserialize_train_para
from activation.normal_activation import Sigmoid
from activation.last_hop_activation import DichotomyLHA
from loss.loss import MSELoss
class FnnEx:
"""
分组训练的 FNN
"""
# 神经网络输入样本,向量维度
_sx_dim = 0
# 神经网络输出样本,向量维度
_sy_dim = 0
# 神经网络层数
_layer_count = 0
# 每一层神经元的数量
_neuron_count_list = None
# 每一层 w 参数
_w_layer = None
# 每一层 w 参数的 delta
_delta_w_layer = None
# 每一层 b 参数
_b_layer = None
# 每一层 b 参数的 delta
_delta_b_layer = None
# 每一层 w 参数的 shape list(除了卷积网络,这个参数没有意义)
_w_shape_layer = None
# 样本数量
_sample_count = 0
# 训练样本分组列表(输入)
_sx_group_list = None
# 训练样本分组列表(输出)
_sy_group_list = None
# 循环训练的最大次数
_loop_max = 1
# 学习效率
_rate = 0
# 激活函数对象(class Activation 的实例)
_activation = Sigmoid()
# 最后一跳激活函数对象(class LastHopActivation 的实例)
_last_hop_activation = DichotomyLHA()
# 损失函数
_loss = MSELoss()
# 记录训练参数文件路径
_para_file_path = 'gl/train_para/'
# 是否通过反序列化初始化训练参数
_init_from_unserialization = False
# 训练参数初始化时所乘以的系数
_alpha_para = 1
def __init__(self, activation=None, last_hop_activation=None, loss=None):
"""
构造函数
:param activation: 激活函数对象
:param last_hop_activation: 后一跳激活函数对象
:param loss: 损失函数对象
"""
if activation is not None:
self._activation = activation
if last_hop_activation is not None:
self._last_hop_activation = last_hop_activation
if loss is not None:
self._loss = loss
''''''
def train(self, sx_group_list, sy_group_list, loop_max, neuron_count_list, rate,
init_from_unserialization=False, alpha_para=1, w_shape_list=None):
"""
功能:神经网络训练\n
参数:\n
sx_group_list:分组训练样本输入列表\n
sy_group_list:分组训练样本输出列表\n
loop_max:循环训练的最大次数 \n
neuron_count_list:每一层神经元数量(对于卷积网络,这个参数没有意义)\n
rate:学习效率 \n
activation:激活函数对象\n
last_hop_activation:最后一跳激活函数对象\n
loss:损失函数对象\n
w_shape_list:每一层 w 参数的 shape list(除了卷积网络,这个参数没有意义)\n
返回值:错误码\n
"""
# 1. 成员变量赋值
self._sx_group_list = sx_group_list
self._sy_group_list = sy_group_list
self._loop_max = loop_max
self._rate = rate
self._init_from_unserialization = init_from_unserialization
self._alpha_para = alpha_para
# 如果是卷积网络,这个参数没有意义(如果是卷积网络,直接传入 None 即可)
self._neuron_count_list = neuron_count_list
# 如果不是卷积网络,这个参数,没有意义(如果不是卷积网络,直接传入默认值即可)
self._w_shape_layer = w_shape_list
# 2. 校验
err = self._valid()
if errorcode.SUCCESS != err:
print("\nvalid error, errcode = %d\n" % err)
return err
# 3. 初始化 w, b,及其他参数
self._init_para()
# 4. 训练
return self._train()
''''''
def _valid(self):
"""
参数校验
:return: error code
"""
# 1. 校验每层神经元
err = self._valid_layer_neuron()
if errorcode.SUCCESS != err:
print("_valid_layer_neuron error, err = %d" % err)
return err
# 2. 输入样本与输出样本
err = self._valid_sample()
if errorcode.SUCCESS != err:
print("_valid_sample error, err = %d" % err)
return err
# 3. 最大循环训练次数,须 >= 1
if 1 > self._loop_max:
print("loop max error, loop_max = %d" % self._loop_max)
return errorcode.FAILED
return errorcode.SUCCESS
''''''
def _valid_layer_neuron(self):
"""
校验每层神经元
:return: error code
"""
# 1. 神经网络层数,须 >= 1
layer_count = len(self._neuron_count_list)
if 1 > layer_count:
return errorcode.FAILED
# 2. 每层的神经元个数,须 >= 1
for layer in range(0, layer_count):
count = self._neuron_count_list[layer]
if 1 > count:
return errorcode.FAILED
return errorcode.SUCCESS
''''''
def _valid_sample(self):
"""
校验训练样本
:return: error code
"""
# 1. 训练样本的输入和输出,分组数量须相同
len1 = len(self._sx_group_list)
len2 = len(self._sy_group_list)
if len1 != len2:
return errorcode.FAILED
# 2. 校验每一组训练样本
for i in range(0, len1):
err = self._valid_sample_sub(self._sx_group_list[i], self._sy_group_list[i])
if errorcode.SUCCESS != err:
return err
return errorcode.SUCCESS
''''''
def _valid_sample_sub(self, sx_list, sy_list):
"""
校验样本
:param sx_list: 输入样本列表
:param sy_list: 输出样本列表
:return: error code
"""
# 1. 输入样本的数量与输出样本的数量,须相同
len1 = len(sx_list)
len2 = len(sy_list)
if len1 != len2:
return errorcode.FAILED
# 2. 样本数量,须 >= 1
sample_count = len1
if 1 > sample_count:
return errorcode.FAILED
# 3. 样本向量维度
# 输入向量维度
sx_dim = len(sx_list[0])
# 输出向量维度
layer_count = len(self._neuron_count_list)
sy_dim = self._neuron_count_list[layer_count - 1]
# 3.1 输入样本/输出样本,向量维度 > 1
if (1 > sx_dim) or (1 > sy_dim):
return errorcode.FAILED
# 3.2 每一个输入/输出样本的向量维度
for i in range(0, sample_count):
shape_in = sx_list[i].shape
shape_out = sy_list[i].shape
# 输入样本的向量维度
if shape_in[0] != sx_dim:
return errorcode.FAILED
# 输入样本只能有1列(因为是个向量)
if shape_in[1] != 1:
return errorcode.FAILED
# 输出样本的向量维度
if shape_out[0] != sy_dim:
return errorcode.FAILED
# 输出样本只能有1列(因为是个向量)
if shape_out[1] != 1:
return errorcode.FAILED
return errorcode.SUCCESS
''''''
def _init_para(self):
"""
初始化参数
:return: error code
"""
# 神经网络输入,向量维度
sx_list = self._sx_group_list[0]
self._sx_dim = len(sx_list[0])
# 神经网络的层数
self._layer_count = len(self._neuron_count_list)
# 神经网络输出,向量维度
self._sy_dim = self._neuron_count_list[self._layer_count - 1]
# 初始化训练参数
return self._init_train_para()
''''''
def _init_train_para(self):
"""
初始化训练参数
:return: NULL
"""
# 初始化 w,b
self._init_w_b()
''''''
def _init_w_b(self):
"""
初始化 w,b 参数
:return: error code
"""
# 通过反序列化,初始化 w,b
if self._init_from_unserialization:
file_path = os.path.dirname(__file__) + "/../gl/train_para/"
self._w_layer, self._b_layer = unserialize_train_para(file_path, self._layer_count, u_flag=False)
# 通过随机值,初始化 w, b
else:
# 每一层 w、b 参数列表
self._w_layer = list()
self._b_layer = list()
# 第1层 w 参数,w 是一个2维数组
w = self._alpha_para * np.random.random((self._neuron_count_list[0], self._sx_dim))
self._w_layer.append(w)
# 第2层~第layer-1层 w 参数,w 是一个2维数组
for i in range(1, self._layer_count):
w = self._alpha_para * np.random.random((self._neuron_count_list[i], self._neuron_count_list[i - 1]))
self._w_layer.append(w)
# 第1层 ~ 第layer-1层 b 参数,b 是一个向量
for i in range(0, self._layer_count):
b = np.zeros([self._neuron_count_list[i], 1])
self._b_layer.append(b)
return errorcode.SUCCESS
''''''
def _train(self):
"""
训练
:return: NULL
"""
# 循环训练次数
loop = 0
# 打印开始时间
localtime = get_local_time()
print("\n\nbegin time = " + localtime + "\n")
while 1:
if loop >= self._loop_max:
# 打印结束时间
localtime = get_local_time()
print("end time = " + localtime + "\n")
# 打印最后一轮参数
# self._print_train_para(loop)
self._write_train_para(loop)
break
loop = loop + 1
# 分组训练
group_count = len(self._sx_group_list)
# 分组训练,训练每一个样本
for g in range(0, group_count):
# 1. 每一组训练之前,预备工作
self._pre_train()
# 2. 获取分组训练样本
sx_list = self._sx_group_list[g]
sy_list = self._sy_group_list[g]
sample_count = len(sx_list)
# 3. 初始化训练参数的 delta
self._init_train_para_delta()
# 4. 针对该组的每一个样本开始训练
for i in range(0, sample_count):
# 第 i 个训练样本
sx = sx_list[i]
sy = sy_list[i]
# 4.1 第 i 个训练样本,经过(多层)神经网络的计算
nn_y_list = self._calc_nn(sx)
# 4.2 最后一跳激活
nn_y = nn_y_list[len(nn_y_list) - 1]
last_hop_y = self._last_hop_activation.active_array(nn_y)
nn_y_list.append(last_hop_y)
# 4.3 根据神经网络计算结果,计算训练参数的 delta(比如:delta w, delta b)
self._calc_train_para_delta(nn_y_list, sx, sy)
# 4.4 一组样本计算完毕,修正训练参数(比如:w, b)
self._modify_train_para()
return errorcode.SUCCESS
''''''
def _pre_train(self):
"""
每一轮训练之前预准备工作(一般来说,啥都不用做)
:return: NULL
"""
pass
''''''
def _calc_nn(self, sx):
"""
计算整个网络的输出
:param sx: 神经网络的输入
:return: 整个神经网络,每一层的输出
"""
x = sx
nn_y_list = list()
# 逐层计算
for layer in range(0, self._layer_count):
# 计算该层的输出
y = self._calc_layer(x, layer)
# 将该层的输出,记录下来
nn_y_list.append(y)
# 本层输出,等于下一层的输入
x = y
# 返回逐层计算的结果
return nn_y_list
''''''
def _calc_layer(self, x, layer):
"""
计算神经网络某一层的输出
:param x: 该层神经网络的输入,x 是一个向量
:param layer: 当前的层数
:return: y,该层神经网络的输出, y 是一个向量
"""
# 获取该层的参数:w, b
w = self._w_layer[layer]
b = self._b_layer[layer]
y = np.matmul(w, x) + b
y = y + self._calc_recurrent(layer)
# 激活
y = self._activation.active_array(y)
return y
''''''
def _calc_recurrent(self, layer):
"""
计算循环神经网络, u * h(t - 1) ,默认值是 0
:param layer: 层数
:return: u * h(t - 1)
"""
return 0
''''''
def _init_train_para_delta(self):
"""
初始化训练参数的 delta
:return: NULL
"""
self._delta_w_layer = list()
self._delta_b_layer = list()
for i in range(0, self._layer_count):
# _delta_w, _w 维度相同,初始值为 0
_delta_w = np.zeros(self._w_layer[i].shape)
self._delta_w_layer.append(_delta_w)
# _delta_b, _b 维度相同,初始值为 0
_delta_b = np.zeros(self._b_layer[i].shape)
self._delta_b_layer.append(_delta_b)
''''''
def _calc_train_para_delta(self, nn_y_list, sx, sy):
"""
计算神经网络训练参数的 delta
:param nn_y_list: 神经网络每一层的输出
:param sx: 训练样本(输入)
:param sy: 训练样本(输出)
:return: NULL
"""
pass
''''''
def _modify_train_para(self):
"""
根据练参数的 delta,修正训练参数
:return: NULL
"""
pass
''''''
def _pre_predict(self):
"""
预测前的准备工作
:return: NULL
"""
pass
''''''
def predict(self, sx_list, revise_strong=False):
"""
神经网络预测
:param sx_list: 待预测的样本列表列表
:param revise_strong: 预测时,修正标记
:return: 预测结果
"""
# 预测前,先做个准备工作
self._pre_predict()
# 开始预测
count = len(sx_list)
py_list = list()
for i in range(0, count):
sx = sx_list[i]
nn_y_list = self._calc_nn(sx)
# 最后一层的 nn_y,才是神经网络的最终输出
nn_y = nn_y_list[len(nn_y_list) - 1]
# 最后一跳激活
lha_y = self._last_hop_activation.active_array(nn_y)
# 最后一跳修正
lhr_y = self._last_hop_activation.predict_revise(lha_y, revise_strong)
# 然后再添加到预测列表
py_list.append(lhr_y)
return py_list
''''''
def predict_recurrent(self, sx, py_list, max_recursion_count=30):
"""
循环(递归)预测
:param sx: 待预测样本
:param py_list: 预测结果
:param max_recursion_count: 最大递归次数
:return: NULL
"""
# 由于是递归调用,所以设置一个保护,防止死循环
count = len(py_list)
if count >= 30:
return
# 因为是递归调用,所以预测前的准备工作,只放在第一次预测时
if 0 == count:
self._pre_predict()
nn_y_list = self._calc_nn(sx)
# 最后一层的 nn_y,才是神经网络的最终输出
nn_y = nn_y_list[len(nn_y_list) - 1]
# 最后一跳激活
lha_y = self._last_hop_activation.active_array(nn_y)
# 最后一跳修正
lhr_y = self._last_hop_activation.predict_revise(lha_y, revise_strong=True)
# 对修正后的结果,再处理一次
r_flag, ch, r_sx = self._handle_lhr(lhr_y)
# 将 recurrent_sx 加入 py_list
py_list.append(ch)
# 如果需要递归,则继续递归预测
if r_flag:
self.predict_recurrent(r_sx, py_list, max_recursion_count)
# 如果不需要递归,则啥都不做
else:
pass
''''''
def _handle_lhr(self, lhr_y):
"""
处理最后一跳修正后的输出
:param lhr_y: 最后一跳修正后的输出
:return: recurrent_flag,是否继续递归;recurrent_sx,如果递归,其 sx = recurrent_sx
"""
r_flag = False
ch = "None"
r_sx = None
return r_flag, ch, r_sx
''''''
def _print_train_para(self, loop):
"""
打印 w, b, loop
:param loop: 神经网络的训练轮次
:return: NULL
"""
# 新启一行
print("\n")
# 训练轮次
print("训练轮次 = %d\n" % loop)
# 训练参数
train_para_str = self._create_train_para_string()
print(train_para_str)
''''''
def _write_train_para(self, loop):
"""
将 w, b,loop 写入文件
:param loop: 神经网络的训练轮次
:return: NULL
"""
# 1. 将训练参数以字符串形式保存在文件中
self._write_train_para_string(loop)
# 2. 将训练参数序列化到文件
self._serialize_train_para()
''''''
def _write_train_para_string(self, loop):
"""
将训练参数以字符串形式保存在文件中
:param loop: 神经网络的训练轮次
:return: NULL
"""
# 训练参数字符串
train_para_str = ""
# 记录时间
localtime = get_local_time()
train_para_str += "time = " + localtime + "\n\n"
# 训练轮次
train_para_str += "训练轮次 = %d\n\n" % loop
# 训练参数
train_para_str += self._create_train_para_string()
# 写入文件
file_name = os.path.dirname(__file__) + "/../" + self._para_file_path + "train_para.txt"
with open(file_name, 'w', newline="\n", encoding='utf-8') as f:
f.write(train_para_str)
''''''
def _serialize_train_para(self):
"""
将训练参数序列化到文件
:return: NULL
"""
file_path = os.path.dirname(__file__) + "/../" + self._para_file_path
for layer in range(0, self._layer_count):
# w 参数文件名
w_file_name = file_path + "w%d" % layer
# 序列化 w
pickle.dump(self._w_layer[layer], open(w_file_name, 'wb'))
# b 参数文件名
b_file_name = file_path + "b%d" % layer
# 序列化 b
pickle.dump(self._b_layer[layer], open(b_file_name, 'wb'))
''''''
def _create_train_para_string(self):
"""
将训练参数转化为 string
:return: 练参数转化后的 string
"""
# 训练参数字符串
train_para_str = ""
# 构建每一层训练参数的字符串
for layer in range(0, self._layer_count):
# loop
train_para_str += "layer = %d\n\n" % layer
# w
train_para_str += "w%d:\n\n" % layer
train_para_str += array_2_string(self._w_layer[layer])
# b
train_para_str += "\n\n"
train_para_str += "b%d:\n\n" % layer
train_para_str += array_2_string(self._b_layer[layer])
# 换行
train_para_str += "\n\n"
return train_para_str
''''''
def stub_set_para(self, sx_dim, neuron_count_list, w_layer, b_layer, activation):
"""
:param sx_dim: 神经网络输入,向量维度
:param neuron_count_list: 神经网络层数
:param w_layer: 每一层 w 参数 列表,w 是个 matrix
:param b_layer: 每一层 b 参数 列表,b 是个 vector
:param activation: 激活函数
:return: NULL
"""
# 神经网络输入,向量维度
self._sx_dim = sx_dim
# 每一层神经元的数量(Neuron Count)
self._neuron_count_list = neuron_count_list
# 神经网络层数
self._layer_count = len(w_layer)
# 每一层 w 参数,w 是个 matrix
self._w_layer = w_layer
# 每一层 b 参数,b 是个 vector
self._b_layer = b_layer
# 激活函数对象
self._activation = activation
```
#### File: code/gl/common_function.py
```python
import pickle
import time
import numpy as np
import random
"""
功能:计算正确率
参数:
py_list:预测结果列表
sy_list:样本结果列表
返回值:NULL
"""
def calculate_accuracy(py_list, sy_list):
# 合法性校验
c_py = len(py_list)
c_sy = len(sy_list)
if (c_py != c_sy) or (0 == c_py):
print("\n错误的参数,c_py = %d, c_sy = %d\n" % (c_py, c_sy))
return -1
# 计算正确率
count = c_py
accuracy = 0
for i in range(0, count):
py = py_list[i]
sy = sy_list[i]
if py[0, 0] == sy[0, 0]:
accuracy = accuracy + 1
else:
pass
return accuracy / count
"""
功能:随机化3维数组
参数:
width:3维数组的 width
height:3维数组的 height
depth:3维数组的 depth
返回值:随机数,数组
说明:因为 random.random() 的范围是 0~1 之间,所以 减去 0.5,使得范围变成 -0.5~0.5 之间
"""
def rand_array_3(width, height, depth):
array = np.zeros([width, height, depth])
for i in range(0, width):
for j in range(0, height):
for k in range(0, depth):
array[i, j, k] = random.random() - 0.5
return array
''''''
def get_local_time():
"""
获取当时的本地时间
:return: 当时的本地时间
"""
localtime = time.localtime()
localtime = time.strftime("%Y-%m-%d %H:%M:%S", localtime)
return localtime
''''''
def unserialize_train_para(file_path, layer_count, u_flag=False):
"""
从文件中反序列化 w,b, u 参数
:param file_path: 序列化文件所在路径
:param layer_count: 神经网络层数
:param u_flag: 是否反序列化 u 参数
:return: w_layer,b_layer,u_layer
"""
# 初始化
w_layer = list()
b_layer = list()
u_layer = list()
# 逐层反序列化
for i in range(0, layer_count):
# w
file_name = file_path + "w%d" % i
w = pickle.load(open(file_name, 'rb'))
w_layer.append(w)
# b
file_name = file_path + "b%d" % i
b = pickle.load(open(file_name, 'rb'))
b_layer.append(b)
# u
if u_flag:
file_name = file_path + "u%d" % i
u = pickle.load(open(file_name, 'rb'))
u_layer.append(u)
# return 反序列化结果
if u_flag:
return w_layer, b_layer, u_layer
else:
return w_layer, b_layer
```
#### File: code/gl/matrix_list.py
```python
import numpy as np
def list_2_matrix(lst):
"""
将 list 转换为 matrix, row = lst.len, col = 1
:param lst: 待转换的 list
:return: 转换后的 matrix
"""
count = len(lst)
arr = np.zeros([count, 1])
for i in range(0, count):
arr[i][0] = lst[i]
return arr
def matrix_2_list(arr):
"""
将 matrix 转换为 list, arr 是 [row, 1] 矩阵
:param arr:
:return: 转换后的 list
"""
shape = arr.shape
row = shape[0]
lst = list()
for i in range(0, row):
lst.append(arr[i][0])
return lst
```
#### File: code/rnn/rnn_ex.py
```python
import os
import pickle
import numpy as np
from bp.bp_nn_ex import BPFnnEx
from gl.array_string import array_2_string
from gl.common_function import unserialize_train_para
from gl.matrix_list import matrix_2_list
class RnnEx(BPFnnEx):
"""
循环神经网络 \n
特别说明: \n
1、h(t) = f(u * h(t - 1) + (w * x + b)) \n
2、隐藏层也有多层,那么对于 u * h(t - 1) 而言,它该是几层呢? \n
3、先假设 u * h(t - 1) 只作用于第1层? \n
"""
# 每一层 u 参数
_u_layer = None
# 每一层 b 参数的 delta
_delta_u_layer = None
# 隐藏层 h(t) 输出,时间序列
_hidden_out_sequence = None
# 训练样本(输入),时间序列
_sx_list = None
# RNN 只作用于第1层的标记
_rnn_layer_0 = True
''''''
def _init_train_para(self):
"""
初始化训练参数
:return: NULL
"""
# 通过反序列化,初始化 w,b, u
if self._init_from_unserialization:
file_path = os.path.dirname(__file__) + "/../gl/train_para/"
self._w_layer, self._b_layer, self._u_layer = \
unserialize_train_para(file_path, self._layer_count, u_flag=True)
# 通过随机数,初始化 w, b, u
else:
# 先调用父类的 _init_train_para
super()._init_train_para()
# 初始化 _u_layer(每一层 u 参数)
self._u_layer = list()
# 虽然本 class 暂时只实现第一层的 rnn,但是 u 参数还是每一层都做个初始化
for i in range(0, self._layer_count):
u = self._alpha_para * np.random.random((self._neuron_count_list[i], self._neuron_count_list[i]))
self._u_layer.append(u)
''''''
def _pre_train(self):
"""
每一组训练之前预准备工作
:return: NULL
"""
# 重新初始化 隐藏层 h(t) 输出,时间序列
self._hidden_out_sequence = list()
# 重新初始化 训练样本(输入),时间序列
self._sx_list = list()
''''''
def _pre_predict(self):
"""
预测前的准备工作
:return: NULL
"""
# 重新初始化 隐藏层 h(t) 输出,时间序列
self._hidden_out_sequence = list()
# 重新初始化 训练样本(输入),时间序列
self._sx_list = list()
''''''
def _calc_nn(self, sx):
"""
计算整个网络的输出
:param sx: 神经网络的输入
:return: 整个神经网络,每一层的输出
"""
# 调用父类计算神经网络的输出
nn_y_list = super()._calc_nn(sx)
# 将神经网络的输出记录下来(时间序列)
self._hidden_out_sequence.append(nn_y_list)
# 将神经网络的训练样本(输入)记录下来(时间序列)
self._sx_list.append(sx)
return nn_y_list
''''''
def _calc_recurrent(self, layer):
"""
计算循环神经网络, u * h(t - 1) ,默认值是 0
:param layer: 层数
:return: u * h(t - 1)
"""
# 如果只计算第1层,如果层数超过第1层,则直接 return 0
if self._rnn_layer_0:
if layer > 0:
return 0
# 其他情形,则计算 recurrent
# 时间序列长度
T = len(self._hidden_out_sequence)
# 0 == T,意味着是 t0 时刻,此时还不存在前一个状态
if 0 == T:
return 0
# 此时意味着,存在前一状态
else:
nn_y_list_pre_time = self._hidden_out_sequence[T - 1] # 前一状态的神经网络各层的输出
nn_y_pre_time = nn_y_list_pre_time[layer] # 前一状态的神经网络第 layer 层的输出
u = self._u_layer[layer] # 该层的 u 参数
uy = np.matmul(u, nn_y_pre_time)
return uy
''''''
def _init_train_para_delta(self):
"""
初始化训练参数的 delta
:return: NULL
"""
# 1. 调用父类,初始化 delta_w, delta_b
super()._init_train_para_delta()
# 2. 初始化 delta_u
self._delta_u_layer = list()
for i in range(0, self._layer_count):
# _delta_u, _u 维度相同,初始值为 0
_delta_u = np.zeros(self._u_layer[i].shape)
self._delta_u_layer.append(_delta_u)
''''''
def _calc_train_para_delta(self, nn_y_list, sx, sy):
"""
计算神经网络训练参数的 delta
:param nn_y_list: 神经网络每一层的输出
:param sx: 训练样本(输入)
:param sy: 训练样本(输出)
:return: NULL
"""
# 1. 调用父类,计算纵向 delta w, delta b
ksi_list = super()._calc_train_para_delta(nn_y_list, sx, sy)
# 2. BPTT 算法,计算 eta_list (暂时只计算第一层(layer = 0)的 eta)
eta_list = self._bptt(ksi_list, layer=0)
# 3. 根据 eta_list 计算 delta u,并且再度计算 delta w, delta b
self._calc_delta_wbu(eta_list, layer=0)
''''''
def _modify_train_para(self):
"""
根据训练参数的 delta,修正训练参数
:return: NULL
"""
# 调用父类函数,修正每一层的 w, b
super()._modify_train_para()
# 修正第0层的 u 参数(暂时只修正第0层)
for layer in range(0, 0):
self._u_layer[layer] -= self._rate * self._delta_u_layer[layer]
''''''
def _bptt(self, ksi_list, layer=0):
"""
随时间反向传播(backpropagation through time, bttt),计算沿着时间轴的 eta_list
:param ksi_list: 当前时间轴的每一层的 ksi 列表
:param layer: 计算某一层的 bptt, layer 默认值是0
:return: eta_list
"""
# 1. 当前时刻
cur_t = len(self._hidden_out_sequence)
# 如果当前是 t0 时刻(cur_t = 1),则无须 bptt
if cur_t <= 1:
return None
# 2. eta_list 初始化
eta_list = [0] * (cur_t - 1)
# 3. 按照时间反向传播,计算 eta
# 3.1 eta_last,等于该层纵向的 ksi
eta_last = ksi_list[layer]
# 3.2 该层(layer)的 u 参数的转置(u.T)
uT = self._u_layer[layer].T
# 3.3 反向计算该层(layer)的 eta
eta_pre = eta_last
for t in range((cur_t - 2), -1, -1):
# 本时刻隐藏层的输出
hidden_out = self._hidden_out_sequence[t][layer]
# 本时刻隐藏层输出的导数
dh = self._activation.derivative_array(hidden_out)
# 将导数变为对角线矩阵
diag_dh = np.diag(matrix_2_list(dh))
# 计算 eta
eta = np.matmul(uT, eta_pre)
eta = np.matmul(diag_dh, eta)
# 存储 delta
eta_list[t] = eta
# 递归(循环)
eta_pre = eta
# 返回 eta_list
return eta_list
''''''
def _calc_delta_wbu(self, eta_list, layer=0):
"""
根据 eta_list 计算 delta u,并且再度计算 delta w, delta b
:param eta_list: 随时间反向传播(backpropagation through time, bttt),计算出沿着时间轴的 eta_list
:return: NULL
"""
# 如果 eta_list = None, 说明是 t0 时刻,此时还不需要计算 detla u/w/b
if eta_list is None:
return
# 当前时刻
cur_t = len(self._hidden_out_sequence)
t = cur_t - 2
dw = np.matmul(eta_list[t], self._sx_list[t].T)
db = eta_list[t]
du = np.matmul(eta_list[t], self._hidden_out_sequence[t][layer].T)
# 计算 delta w, delta b, delta u
self._delta_w_layer[layer] += dw
self._delta_b_layer[layer] += db
self._delta_u_layer[layer] += du
''''''
def _create_train_para_string(self):
"""
将训练参数转化为 string
:return: 练参数转化后的 string
"""
# 这段代码写的不好,与父类有大量重复,后续再优化吧
# 训练参数字符串
train_para_str = ""
# 再补上 u 的字符串
for layer in range(0, self._layer_count):
# loop
train_para_str += "layer = %d\n\n" % layer
# w
train_para_str += "w%d:\n\n" % layer
train_para_str += array_2_string(self._w_layer[layer])
# b
train_para_str += "\n\n"
train_para_str += "b%d:\n\n" % layer
train_para_str += array_2_string(self._b_layer[layer])
# u
train_para_str += "\n\n"
train_para_str += "u%d:\n\n" % layer
train_para_str += array_2_string(self._u_layer[layer])
# 换行
train_para_str += "\n\n"
return train_para_str
''''''
def _serialize_train_para(self):
"""
将训练参数序列化到文件
:return: NULL
"""
# 先调用父类,序列化 w, b
super()._serialize_train_para()
# 再序列化 u
file_path = os.path.dirname(__file__) + "/../" + self._para_file_path
for layer in range(0, self._layer_count):
# u 参数文件名
u_file_name = file_path + "u%d" % layer
# 序列化 u
pickle.dump(self._u_layer[layer], open(u_file_name, 'wb'))
''''''
def stub_set_para(self, neuron_count_list, w_layer, b_layer, u_layer):
"""
设置神经网络参数:w, b, u
:param neuron_count_list: 神经网络层数
:param w_layer: 每一层 w 参数 列表
:param b_layer: 每一层 b 参数 列表
:param u_layer: 每一层 u 参数 列表
:return: NULL
"""
# 每一层神经元的数量(Neuron Count)
self._neuron_count_list = neuron_count_list
# 神经网络层数
self._layer_count = len(w_layer)
# 每一层 w 参数
self._w_layer = w_layer
# 每一层 b 参数
self._b_layer = b_layer
# 每一层 u 参数
self._u_layer = u_layer
```
#### File: code/rnn/rnn_poet.py
```python
from gl.matrix_list import matrix_2_list, list_2_matrix
from gl.poem_encoder import PoemEncoder
from rnn.rnn_ex import RnnEx
class Poet(RnnEx):
"""
通过循环神经网络,写诗(或者其他文字)
"""
# 汉字编码解码器
_hanzi_encoder = PoemEncoder.instance()
''''''
def _handle_lhr(self, lhr_y):
"""
处理最后一跳修正后的输出
:param lhr_y: 最后一跳修正后的输出
:return: recurrent_flag,是否继续递归;recurrent_sx,如果递归,其 sx = recurrent_sx
"""
# 将矩阵 lhr_y 转成 list
lst = matrix_2_list(lhr_y)
# 解码
ch = self._hanzi_encoder.decode(lst)
# 如果 ch == END,那么结束递归
if self._hanzi_encoder.is_end(ch):
r_flag = False
r_sx = None
# 否则,递归下去
else:
# 将 ch 编码
r_sx = self._hanzi_encoder.encode(ch)
# 将 r_sx 转换为矩阵
r_sx = list_2_matrix(r_sx)
r_flag = True
return r_flag, ch, r_sx
```
#### File: sample/image_created/number_image_created.py
```python
from PIL import Image
import random
import os
from PIL import ImageDraw
from PIL import ImageFont
class NumberImageCreated:
"""
1、暂时只生成 0~9 图像\n
2、图像是 RGB,字体颜色是黑色,底色是灰度\n
3、生成图像的目录,暂时写死\n
"""
# sx image width
sx_width = 0
# sx image height
sx_height = 0
# sy image width
sy_width = 0
# sy image height
sy_height = 0
# image path
image_path = "./../../picture/number2"
# 图像格式
image_format = "bmp"
# min number(最小的数字,0)
min_number = 0
# max number(最大的数字,9)
max_number = 9
# 每个数字生成的 sx 样本数量
sx_count_per_number = 1000
# 黑色
black = 0
# 白色
white = 255
# 字体
font = "Arial.ttf"
# 字体大小
font_size = 10
"""
功能:构造函数
参数:TODO 补充
返回值:NULL
"""
def __init__(self, sx_width, sx_height, sy_width, sy_height):
self.sx_width = sx_width
self.sx_height = sx_height
self.sy_width = sy_width
self.sy_height = sy_height
"""
功能:生成数字图像(文件)
参数:TODO 补充
返回值:NULL
"""
def create_image(self, image_path=None, sx_count_per_number=1000):
# 1. 参数赋值
if image_path is not None:
self.image_path = image_path
self.sx_count_per_number = sx_count_per_number
# 2. 生成图像
for i in range(self.min_number, self.max_number + 1):
# 2.1 生成 sx init 图像
self._create_sx_init_image(i)
# 2.2 生成 sx 图像
for j in range(0, self.sx_count_per_number):
self._create_sx_image(i, j)
# 2.3 生成 sy 图像
self._create_sy_image(i)
"""
功能:生成数字图像(文件)目录
参数:TODO 补充
返回值:NULL
"""
def _create_image_path(self, number):
# 图像路径
path = self.image_path + "/" + str(number)
# 判断路径是否存在
if os.path.exists(path):
return
# 如果不存在,则创建路径
else:
os.makedirs(path)
"""
功能:生成数字 sx 原始图像
参数:TODO 补充
返回值:NULL
"""
def _create_sx_init_image(self, number):
# 图像路径
path = self.image_path + "/" + str(number)
# 文件名
file_name = path + "/" + "sx_" + str(number) + "_init" + ".bmp"
# image width
width = self.sx_width
# image height
height = self.sx_height
# 是否有噪声(无噪声)
noise = False
# 创建图像文件
self._create_image(file_name, width, height, number, noise)
"""
功能:生成数字 sx 图像
参数:TODO 补充
返回值:NULL
"""
def _create_sx_image(self, number, count):
# 图像路径
path = self.image_path + "/" + str(number)
# 文件名
file_name = path + "/" + "sx_" + str(number) + "_" + str(count) + ".bmp"
# image width
width = self.sx_width
# image height
height = self.sx_height
# 是否有噪声(无噪声)
noise = True
# 创建图像文件
self._create_image(file_name, width, height, number, noise)
"""
功能:生成数字 sy 图像
参数:TODO 补充
返回值:NULL
"""
def _create_sy_image(self, number):
# 图像路径
path = self.image_path + "/" + str(number)
# sx 文件名
sx_file_name = path + "/" + "sx_" + str(number) + "_init" + ".bmp"
# sy 文件名
sy_file_name = path + "/" + "sy_" + str(number) + ".bmp"
# sx image
sx_img = Image.open(sx_file_name)
# sy image(裁剪 sx image)
# left = (self.sx_width - self.sy_width) / 2
left = 0
# top = (self.sx_height - self.sy_height) / 2
top = 0
right = left + self.sy_width
bottom = top + self.sy_height
sy_img = sx_img.crop([left, top, right, bottom])
# 保存
sy_img.save(sy_file_name, format=self.image_format)
sx_img.close()
"""
功能:生成数字 sx 原始图像
参数:TODO 补充
返回值:NULL
"""
def _create_image(self, file_name, width, height, number, noise):
# 1. 新建一个图像(RGB, 白底)
image = Image.new('RGB', (width, height), self._white_color())
# 2. 创建 draw 对象:
draw = ImageDraw.Draw(image)
# 3. 创建 font 对象:
# font = ImageFont.truetype(self.font, self.font_size)
# 5. 填充每个像素:
if noise:
for x in range(0, width):
for y in range(0, height):
draw.point((x, y), fill=self._noise_color())
# 4. 输出文字:
x = width / 3 + 1
y = height / 3 - 1
# draw.text((x, y), str(number), font=font, fill=self._black_color())
draw.text((x, y), str(number), fill=self._black_color())
# 6. 保存文件
image.save(file_name, format=self.image_format)
"""
功能:黑色
参数:NULL
返回值:RGB 三元组(黑色)
"""
def _black_color(self):
return self.black, self.black, self.black
"""
功能:白色
参数:NULL
返回值:RGB 三元组(黑色)
"""
def _white_color(self):
return self.white, self.white, self.white
"""
功能:噪声的颜色
参数:NULL
返回值:RGB 三元组(随机灰色)
"""
def _noise_color(self):
# 随机数
noise = random.random() * (self.white - 200) + 200
# 四舍五入取整
noise = round(noise)
return noise, noise, noise
"""
功能:删除数字图像(文件)
参数:TODO 补充
返回值:NULL
"""
def del_image(self):
for root, dirs, files in os.walk(self.image_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
"""
功能:测试 NumberImageSample,创建 image(文件)
参数:NULL
返回值:NULL
"""
def test_create_image():
nis = NumberImageCreated(20, 20, 16, 16)
nis.create_image(None, 1000)
"""
功能:删除 image(文件)
参数:NULL
返回值:NULL
"""
def test_del_image():
nis = NumberImageCreated(20, 20, 16, 16)
nis.del_image()
```
#### File: code/sample/image_softmax_sample.py
```python
import numpy as np
import os
from gl.common_enum import ArrayDim
from my_image import my_image
from my_image.my_image import gray_file
from sample.fully_connected_sample import FullConnectedSample
class ImageSoftMaxSample(FullConnectedSample):
"""
1、暂时只使用 0~9 图像,进行图像识别\n
2、图像的目录,暂时写死\n
"""
def create_sample(self, image_root_path, confuse=True):
"""
功能:创建样本\n
参数:\n
sx_dim:样本,输入向量的维度\n
sy_dim:样本,输出向量的维度\n
返回值:NULL\n
"""
# 1. 初始化
self._sx_list = list()
self._sy_list = list()
# 2. 构建 sx_list, sy_list
# 获取 image_path 下所有文件
for root, dirs, files in os.walk(image_root_path, topdown=True):
group_count = len(dirs)
if 0 == group_count:
continue
else:
for directory in dirs:
image_file_path = os.path.join(root, directory)
index = int(directory) # directory 以 数字命名
# 创建样本
self._create_sample(image_file_path, index)
# 样本混淆
if confuse:
self._confuse(group_count)
return # 直接 return,不再继续创建样本
''''''
def _create_sample(self, image_file_path, index):
"""
创建样本
:param image_file_path:图像文件路径
:param index:图像文件路径所对应的数字
:return:NULL
"""
# 获取 image_file_path 下所有文件
for root, dirs, files in os.walk(image_file_path, topdown=False):
for name in files:
image_file_name = os.path.join(root, name)
# 1 构建 sx
sx = self._create_sx(image_file_name)
self._sx_list.append(sx)
# 2 构建 sy
sy = self._create_sy(index)
self._sy_list.append(sy)
''''''
@staticmethod
def _create_sx(image_file_name):
"""
功能:将一个图像文件转换为训练样本的输入 \n
参数:\n
image_file_name:图像文件名 \n
返回值:sx \n
"""
# 取图像灰度值
gray, err = gray_file(image_file_name, ArrayDim.THREE)
# 将图像数据中的0转换为极小值
my_image.array_0_tiny(gray)
# 归一化
gray = my_image.normalize(gray, my_image.NormalizationType.NORMAL)
# 将灰度图像值,从3维数组转变为1维数组(list)
gray = my_image.array_3_1(gray)
gray = gray / 40
return gray
''''''
@staticmethod
def _create_sy(index):
"""
通过解析图像文件名,构建为训练样本的输出
:return: 训练样本的输出
"""
# sy 是一个 10 维向量
sy = np.zeros([10, 1])
sy[index][0] = 1
return sy
''''''
def _confuse(self, group_count):
"""
将训练样本的顺序混淆一下
:return:NULL
"""
sx_list = list()
sy_list = list()
count = int(len(self._sx_list) / group_count)
for i in range(0, count):
for j in range(0, group_count):
index = j * count + i
sx_list.append(self._sx_list[index])
sy_list.append(self._sy_list[index])
self._sx_list = sx_list
self._sy_list = sy_list
''''''
def create_sample_ex(self, count):
"""
认为构建样本,样本的可区分度强
:return:NULL
"""
self._sx_list = list()
self._sy_list = list()
for i in range(0, count):
sx_0 = 0.5 * np.random.random((400, 1))
sx_0 = sx_0 / 40
self._sx_list.append(sx_0)
sy_0 = np.zeros([10, 1])
sy_0[0][0] = 1
self._sy_list.append(sy_0)
sx_1 = 0.5 * np.random.random((400, 1)) + 0.5
sx_1 = sx_1 / 40
self._sx_list.append(sx_1)
sy_1 = np.zeros([10, 1])
sy_1[1][0] = 1
self._sy_list.append(sy_1)
```
#### File: code/sample/points_sample.py
```python
import numpy as np
import random
from gl import draw
from sample.fully_connected_sample import FullConnectedSample
class PointsSample(FullConnectedSample):
"""
1、点训练样本,base class \n
1、训练输入样本是一系列的点(坐标值) \n
2、训练输出样本可以是分类,也可以是其他 \n
"""
# 样本,输入向量,每个元素的最大值
_sx_max = 0
''''''
def create_sample(self, sample_count, sx_max, sx_dim, sy_dim):
"""
功能:创建样本\n
参数:\n
sample_count:样本数量\n
sx_max:样本,输入向量,每个元素的最大值\n
sx_dim:样本,输入向量的维度\n
sy_dim:样本,输出向量的维度\n
返回值:NULL\n
"""
# 1. 初始化
self._sample_count = sample_count
self._sx_max = sx_max
self._sx_dim = sx_dim
self._sy_dim = sy_dim
# 2. 创建训练样本,输入
self._create_sx_list()
# 3. 创建训练样本,输出
self._create_sy_list()
"""
功能:创建样本,输入
参数:NULL
返回值:NULL
"""
def _create_sx_list(self):
# 初始化 sx_list
self._sx_list = list()
# 创建 sample_count 个训练样本输入,sx
for i in range(0, self._sample_count):
# sx 是一个 [sx_dim, 1] 的矩阵
sx = np.empty([self._sx_dim, 1])
for j in range(0, self._sx_dim):
"""
1、默认采用随机数创建
2、random.random() 是介于 (0, 1) 之间的一个随机数(记为 r)
3、r 减去 0.5,是为了构建随机的正负数,其范围是 (-0.5, 0,5)
4、所以,需要乘以2,再乘以 max
"""
sx[j][0] = (random.random() - 0.5) * 2 * self._sx_max[j]
self._sx_list.append(sx)
"""
功能:创建样本,输出
参数:NULL
返回值:NULL
说明:这是一个虚函数,待子类重载
"""
def _create_sy_list(self):
pass
"""
功能:画出样本
参数:NULL
返回值:NULL
"""
def draw_sample(self, title):
# 初始化
draw.init_draw(title)
# 画样本点
draw.draw_points(self._sx_list, self._sy_list)
# 画分割(线)
self.draw_segment()
# 显示图像
draw.show()
"""
功能:画分割(线)
参数:NULL
返回值:NULL
说明:这是一个虚函数,待子类重载
"""
def draw_segment(self):
pass
"""
创建固定样本
"""
def create_sample_stub(self):
# 2. 创建训练样本,输入
self._create_sx_list_stub()
# 3. 创建训练样本,输出
self._create_sy_list()
"""
功能:创建固定样本,输入
参数:NULL
返回值:NULL
"""
def _create_sx_list_stub(self):
pass
```
#### File: code/sample/sin_sample.py
```python
import numpy as np
from matplotlib import pyplot as plt
from activation import dichotomy
from sample.points_sample import PointsSample
"""
class:SinSample
说明:
1、重载两个函数: _create_sy_list, draw_segment
2、对于 SinSample 而言,其输入只可能是2个维度的向量,其输出只可能是1个维度的向量
3、所以代码中,针对输入输出的向量维度,有很多写死的地方
"""
class SinSample(PointsSample):
# 欧米咖:sin(wx)
__omega = 2 * np.pi
"""
功能:重载父类的 _create_sy_list
参数:NULL
返回值:NULL
说明:
1、对于 SinSample 而言,其输入只可能是2个维度的向量,其输出只可能是1个维度的向量
2、所以代码中,针对输入输出的向量维度,直接写死
"""
def _create_sy_list(self):
# 1. 初始化
self._sy_list = list()
# 2. 构建 sy_list,sy 是分类 C1 or C2
for i in range(0, self._sample_count):
# sx 是一个 [2, 1] 的矩阵
sx = self._sx_list[i]
x0 = sx[0][0] # 对应到坐标系的 x
x1 = sx[1][0] # 对应到坐标系的 y
# sy 是一个 [1, 1] 的矩阵
sy = np.empty([1, 1])
# 计算 sin(x0)
sin_x0 = self._sx_max[1] * np.sin(self.__omega * x0)
# 比较
if x1 >= sin_x0:
sy[0][0] = dichotomy.Dichotomy.C1.value
else:
sy[0][0] = dichotomy.Dichotomy.C2.value
self._sy_list.append(sy)
"""
功能:重载父类的 draw_segment
参数:NULL
返回值:NULL
"""
def draw_segment(self):
# 1. 绘制 sin(x) 图像
x = np.linspace(-self._sx_max[0], self._sx_max[0], 300)
y = self._sx_max[1] * np.sin(self.__omega * x)
plt.plot(x, y, color='blue', linewidth=1.0)
```
#### File: code/sample/two_line_sample.py
```python
import numpy as np
from matplotlib import pyplot as plt
from activation import dichotomy
from sample.points_sample import PointsSample
"""
class:StraightLineSample
说明:
1、重载两个函数: _create_sy_list, draw_segment
2、对于 StraightLineSample 而言,其输入只可能是2个维度的向量,其输出只可能是1个维度的向量
3、所以代码中,针对输入输出的向量维度,有很多写死的地方
"""
class TwoLineSample(PointsSample):
# a0, b0
__a0 = 1
__b0 = 0.5
# h1
__a1 = 1
__b1 = -0.5
"""
功能:重载父类的 _create_sy_list
参数:NULL
返回值:NULL
说明:
1、对于 SinSample 而言,其输入只可能是2个维度的向量,其输出只可能是1个维度的向量
2、所以代码中,针对输入输出的向量维度,直接写死
"""
def _create_sy_list(self):
# 1. 初始化
self._sy_list = list()
# 2. 构建 sy_list,sy 是分类 C1 or C2
for i in range(0, self._sample_count):
# sx 是一个 [2, 1] 的矩阵
sx = self._sx_list[i]
x0 = sx[0][0] # 对应到坐标系的 x
x1 = sx[1][0] # 对应到坐标系的 y
# sy 是一个 [1, 1] 的矩阵
sy = np.empty([1, 1])
sl0_x0 = self.__a0 * x0 + self.__b0
sl1_x1 = self.__a1 * x0 + self.__b1
# 比较
if (x1 >= sl0_x0) or (x1 <= sl1_x1):
sy[0][0] = dichotomy.Dichotomy.C1.value
else:
sy[0][0] = dichotomy.Dichotomy.C2.value
self._sy_list.append(sy)
"""
功能:重载父类的 draw_segment
参数:NULL
返回值:NULL
"""
def draw_segment(self):
# 1. 绘制两根直线
"""
# plt.axhline(self.h0, color='blue', linewidth=1.0)
# plt.axhline(self.h1, color='blue', linewidth=1.0)
"""
x = np.arange(-(self._sx_max[0]), (self._sx_max[0] + 0.5))
y = self.__a0 * x + self.__b0
plt.plot(x, y, color='blue', linewidth=1.0)
x = np.arange(-(self._sx_max[0]), (self._sx_max[0] + 0.5))
y = self.__a1 * x + self.__b1
plt.plot(x, y, color='blue', linewidth=1.0)
"""
功能:创建固定样本,输入
参数:NULL
返回值:NULL
"""
def _create_sx_list_stub(self):
#
self._sx_max = list()
self._sx_max.append(1)
self._sx_max.append(1)
# 样本数量
self._sample_count = 5
# 初始化 sx_list
self._sx_list = list()
sx = np.empty([2, 1])
sx[0, 0] = 0
sx[1, 0] = 0.8
self._sx_list.append(sx)
sx = np.empty([2, 1])
sx[0, 0] = 0
sx[1, 0] = -0.8
self._sx_list.append(sx)
sx = np.empty([2, 1])
sx[0, 0] = 0.5
sx[1, 0] = 0.5
self._sx_list.append(sx)
sx = np.empty([2, 1])
sx[0, 0] = -0.5
sx[1, 0] = -0.5
self._sx_list.append(sx)
sx = np.empty([2, 1])
sx[0, 0] = 0
sx[1, 0] = 0
self._sx_list.append(sx)
``` |
{
"source": "1801Python/flask_demo",
"score": 2
} |
#### File: app/controllers/user.py
```python
def get_user():
return [dict(name='张三', age=18), dict(name='李四', age=20)]
``` |
{
"source": "180254/scheduled-events-operator-2",
"score": 2
} |
#### File: 180254/scheduled-events-operator-2/seoperator2.py
```python
import abc
import collections
import email.utils
import functools
import inspect
import json
import os
import pickle
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import urllib.error
import urllib.parse
import urllib.request
from datetime import datetime, timezone
from fnmatch import fnmatch
from typing import Any, Dict, Generic, Iterable, Iterator, List, Optional, Tuple, Type, TypeVar
T = TypeVar("T")
# A simple decorator that can be used to wrap a function so that it is retried with a backoff strategy.
def retry(
exceptions: Optional[Tuple[Type[BaseException], ...]] = None,
delays: Optional[List[float]] = None):
# Repeat http connection and data processing errors by default.
if exceptions is None:
exceptions = (urllib.error.URLError, urllib.error.HTTPError, ValueError)
# The default backoff strategy.
if delays is None:
delays = [0.5, 1, 1, 3, 5]
def retry_decorator(f):
@functools.wraps(f)
def true_retry_decorator(*args, **kwargs):
i = 0
while True:
try:
return f(*args, **kwargs)
except exceptions as e:
# Set eventid in log. That customization is a bit hacky, but it's simple and enough.
print_kwargs = {}
for idx, val in enumerate(inspect.getfullargspec(f).args):
if val == "event":
event = args[idx]
if hasattr(event, 'eventid'):
print_kwargs["eventid"] = event.eventid
break
func_name = f.__qualname__
exception_name = e.__class__.__qualname__
if i >= len(delays):
print_(f"Function {func_name} failed due to {exception_name}. "
f"Mo more retries, it was the last attempt.",
**print_kwargs)
raise e
delay = delays[i]
print_(f"Function {func_name} failed due to {exception_name}. "
f"Retrying in {delay}s",
**print_kwargs)
# Todo: instead of time.sleep, use exit_threading_event from LifeManager.
# Is it possible to do this without singleton/global variable/etc?
# Which is the ok solution?
# if life_manager.exit_threading_event.wait(delay):
# print_(f"Function {func_name} failed due to {exception_name}. "
# "Retry cancelled due to ongoing graceful shutdown.",
# **print_kwargs)
# raise e
time.sleep(delay)
i += 1
return true_retry_decorator
return retry_decorator
# Serialize arbitrary Python objects to JSON.
# Fixes: TypeError: Object of type Xyz is not JSON serializable
# Fix consists of JsonSerializable, JsonSerializableEncoder.
class JsonSerializable(abc.ABC):
@abc.abstractmethod
def to_json(self) -> Any:
pass
def __str__(self) -> str:
return str(self.to_json())
def __repr__(self) -> str:
return (f"{self.__class__.__name__}("
f"{repr(self.to_json())})")
# Take a look at the documentation of JsonSerializable.
# https://docs.python.org/3/library/json.html#json.JSONEncoder.default
class JsonSerializableEncoder(json.JSONEncoder):
def default(self, o: Any):
if isinstance(o, JsonSerializable):
return o.to_json()
# Let the base class default method raise the TypeError
return json.JSONEncoder.default(self, o)
# Print in a version that produces output containing json.
def print_(message: str, **kwargs) -> None:
timestamp = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
print(json.dumps({"timestamp": timestamp, "message": message, **kwargs}, cls=JsonSerializableEncoder), flush=True)
# An interface that provides methods to "cache" (save) state.
# The state is stored on disk, under the specified key.
class Cacheable(Generic[T]):
def __init__(self, cache_dir: str) -> None:
super().__init__()
self._cache_dir: str = cache_dir
self._cache_enabled: bool = os.path.isdir(cache_dir)
def _cache_read(self, key: str, default_value: T) -> T:
if not self._cache_enabled:
return default_value
cache_file = os.path.join(self._cache_dir, key)
try:
with open(cache_file, "rb") as handle:
return pickle.load(handle)
except (OSError, IOError):
return default_value
def _cache_write(self, key: str, value: T) -> None:
if not self._cache_enabled:
pass
cache_file = os.path.join(self._cache_dir, key)
with open(cache_file, "wb") as handle:
pickle.dump(value, handle)
# A list that saves the state to disk with each change.
# The list recreates its last state in the constructor, so it is immune to container restarts.
class CacheableList(Cacheable[List[T]], Iterable[T], JsonSerializable):
def __init__(self, cache_dir: str, name: str) -> None:
super().__init__(cache_dir)
self._name: str = name
self._list: List[T] = super()._cache_read(name, [])
def append(self, value: T) -> None:
self._list.append(value)
self._cache_write(self._name, self._list)
def remove(self, value: T) -> None:
self._list.remove(value)
self._cache_write(self._name, self._list)
def __len__(self) -> int:
return len(self._list)
def __iter__(self) -> Iterator[T]:
return iter(self._list)
def to_json(self) -> List[T]:
return self._list
# This class has easily accessible information about the name of the VM on which this script is running.
# The "Azure Instance Metadata Service (Linux)" is helpful.
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service?tabs=linux
class ThisHostnames(JsonSerializable):
@retry()
def __init__(self, api_metadata_instance: str, socket_timeout_seconds: int) -> None:
super().__init__()
request = urllib.request.Request(api_metadata_instance)
request.add_header("Metadata", "true")
with urllib.request.urlopen(request, timeout=socket_timeout_seconds) as response:
if response.status // 100 != 2:
raise ValueError("Instance metadata API responded with code {response.status}.")
metadata_instance = json.loads(response.read())
self.hostname: str = socket.gethostname()
self.compute_name: str = metadata_instance["compute"]["name"]
self.node_name: str = self._compute_name_to_node_name(self.compute_name)
# example
# input: aks-default-36328368-vmss_18
# output: aks-default-36328368-vmss00000i
@staticmethod
def _compute_name_to_node_name(compute_name: str) -> str:
name_prefix, vm_index_base10 = compute_name.split("_")
vm_index_base36 = ThisHostnames._b36_encode(int(vm_index_base10))
return name_prefix + vm_index_base36.rjust(6, "0")
@staticmethod
def _b36_encode(num: int) -> str:
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
result = ""
while not result or num > 0:
num, i = divmod(num, 36)
result = digits[i] + result
return result
def to_json(self) -> Dict[str, Any]:
return {
"hostname": self.hostname,
"compute_name": self.compute_name,
"node_name": self.node_name,
}
# An object-oriented representation of a single "scheduled event".
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events#query-for-events
class ScheduledEvent(JsonSerializable):
NOT_A_DATE = datetime.fromtimestamp(0, timezone.utc)
NOT_A_DATE_ISO = datetime.fromtimestamp(0, timezone.utc).isoformat()
def __init__(self, event: Dict[str, Any]) -> None:
super().__init__()
self._raw: Dict[str, Any] = event
self.eventid: str = event["EventId"]
self.eventtype: str = event["EventType"]
self.resourcetype: str = event["ResourceType"]
self.resources: List[str] = event["Resources"]
self.eventstatus: str = event["EventStatus"]
self.notbefore: datetime = self._parsedate_to_datetime(event["NotBefore"])
self.description: str = event["Description"]
self.eventsource: str = event["EventSource"]
self.durationinseconds: int = event["DurationInSeconds"]
self.seo2startedat: datetime = datetime.fromisoformat(event.get("Seo2StartedAt", ScheduledEvent.NOT_A_DATE_ISO))
def mark_as_started(self) -> None:
self.seo2startedat = datetime.now(timezone.utc)
def seconds_since_started(self) -> float:
return abs((datetime.now(timezone.utc) - self.seo2startedat).total_seconds())
@staticmethod
# https://bugs.python.org/issue30681
def _parsedate_to_datetime(value) -> datetime:
try:
result = email.utils.parsedate_to_datetime(value)
if result is None:
raise ValueError
return result
except (TypeError, ValueError):
return ScheduledEvent.NOT_A_DATE
def to_json(self) -> Dict[str, Any]:
return {
**self._raw,
"Seo2StartedAt": self.seo2startedat.isoformat()
}
# An object-oriented representation of a whole "scheduled events" response.
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events#query-for-events
class ScheduledEvents(Iterable[ScheduledEvent], JsonSerializable):
def __init__(self, events: Dict[str, Any]) -> None:
super().__init__()
self._raw: Dict[str, Any] = events
self.document_incarnation: int = events["DocumentIncarnation"]
self.events: List[ScheduledEvent] = list(map(ScheduledEvent, events.get("Events", [])))
def __len__(self) -> int:
return len(self.events)
def __iter__(self) -> Iterator[ScheduledEvent]:
return iter(self.events)
def to_json(self) -> Dict[str, Any]:
return self._raw
# A tool to perform operations on the "scheduled events" API.
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events#query-for-events
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/scheduled-events#start-an-event
class ScheduledEventsManager:
def __init__(self,
api_metadata_scheduledevents: str,
socket_timeout_seconds: int,
delay_before_program_close_seconds: int) -> None:
super().__init__()
self.api_metadata_scheduledevents: str = api_metadata_scheduledevents
self.socket_timeout: int = socket_timeout_seconds
self.delay_before_program_close_seconds: int = delay_before_program_close_seconds
@retry()
def query_for_events(self) -> ScheduledEvents:
request = urllib.request.Request(self.api_metadata_scheduledevents)
request.add_header("Metadata", "true")
with urllib.request.urlopen(request, timeout=self.socket_timeout) as response:
if response.status // 100 != 2:
raise ValueError("ScheduledEvents API responded with code {response.status}.")
metadata_scheduledevents = json.loads(response.read())
return ScheduledEvents(metadata_scheduledevents)
@retry()
def start_an_event(self, event: ScheduledEvent) -> str:
# A node redeploy can follow immediately, sleep as at the end of the program.
# Give some time to external monitoring to collect logs.
time.sleep(self.delay_before_program_close_seconds)
data = {"StartRequests": [{"EventId": event.eventid}]}
data_bytes = json.dumps(data).encode("utf-8")
request = urllib.request.Request(self.api_metadata_scheduledevents, data=data_bytes)
request.add_header("Metadata", "true")
with urllib.request.urlopen(request, timeout=self.socket_timeout) as response:
if response.status // 100 != 2:
raise ValueError("ScheduledEvents API responded with code {response.status}.")
return response.read().decode('utf-8', 'ignore')
# Subprocess related tools, external dependencies somehow have to be running.
class SubprocessUtils:
def __init__(self):
raise AssertionError
@staticmethod
def subprocess_run_async(cmd: List[str], **_print_kwargs) -> 'subprocess.Popen[str]':
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True, bufsize=1)
print_(f"Running a command {cmd}.", subprocess=proc.pid, **_print_kwargs)
proc_output_reader = threading.Thread(target=SubprocessUtils._subprocess_stdout_reader,
args=(proc,),
kwargs=_print_kwargs)
proc_output_reader.start()
return proc
@staticmethod
# https://stackoverflow.com/a/18423003
def _subprocess_stdout_reader(proc: 'subprocess.Popen[str]', **print_kwargs) -> None:
if proc.stdout is not None:
# pyre-ignore[16]: https://github.com/facebook/pyre-check/issues/221
for message in proc.stdout:
print_(message, subprocess=proc.pid, **print_kwargs)
@staticmethod
def subprocess_run_sync(cmd: List[str], **_print_kwargs) -> 'subprocess.CompletedProcess[str]':
print_(f"Running a command {cmd}.", subprocess=-1, **_print_kwargs)
return subprocess.run(cmd, text=True, capture_output=True, check=False)
# An object-oriented representation of a "kubectl version" response.
class KubectlVersion(JsonSerializable):
def __init__(self, client_version: str, server_version: str, stderr: str) -> None:
super().__init__()
self.client_version: str = client_version
self.server_version: str = server_version
self.stderr: str = stderr
def to_json(self) -> Dict[str, Any]:
return {
"client_version": self.client_version,
"server_version": self.server_version,
"stderr": self.stderr
}
# Tool to perform operations with the "kubectl" tool.
class KubectlManager:
def __init__(self,
cache_dir: str,
kubectl_drain_options: List[str],
this_hostnames: ThisHostnames) -> None:
super().__init__()
self.kubectl_cordon_cache: CacheableList[ScheduledEvent] = CacheableList(cache_dir, "kubectl_cordon_cache")
self.kubectl_drain_options: List[str] = kubectl_drain_options
self.this_hostnames: ThisHostnames = this_hostnames
@retry((subprocess.SubprocessError, ValueError))
def kubectl_cordon(self, event: ScheduledEvent) -> None:
proc = SubprocessUtils.subprocess_run_async(
["kubectl", "cordon", self.this_hostnames.node_name],
eventid=event.eventid)
exit_code = proc.wait()
if exit_code != 0:
raise ValueError("kubectl cordon operation failed with code {exit_code}.")
self.kubectl_cordon_cache.append(event)
@retry((subprocess.SubprocessError, ValueError))
def kubectl_drain(self, event: ScheduledEvent) -> None:
proc = SubprocessUtils.subprocess_run_async(
["kubectl", "drain", self.this_hostnames.node_name, *self.kubectl_drain_options],
eventid=event.eventid)
exit_code = proc.wait()
if exit_code != 0:
raise ValueError("kubectl drain operation failed with code {exit_code}.")
@retry((subprocess.SubprocessError, ValueError))
def kubectl_uncordon(self, event: ScheduledEvent) -> None:
proc = SubprocessUtils.subprocess_run_async(
["kubectl", "uncordon", self.this_hostnames.node_name],
eventid=event.eventid)
exit_code = proc.wait()
if exit_code != 0:
raise ValueError("kubectl uncordon operation failed with code {exit_code}.")
self.kubectl_cordon_cache.remove(event)
@staticmethod
@retry((subprocess.SubprocessError, ValueError))
def kubectl_version() -> KubectlVersion:
kubectl_version_proc = SubprocessUtils.subprocess_run_sync(["kubectl", "version", "-o", "json"])
if kubectl_version_proc.returncode != 0:
exit_code = kubectl_version_proc.returncode
stderr = kubectl_version_proc.stderr
raise ValueError(f"kubectl version operation failed with code {exit_code}, stderr='{stderr}'.")
try:
versions = json.loads(kubectl_version_proc.stdout)
except json.JSONDecodeError:
print_("Failed to parse 'kubectl version' response.")
versions = {}
client_version = versions.get("clientVersion", {}).get("gitVersion", None)
server_version = versions.get("serverVersion", {}).get("gitVersion", None)
stderr = kubectl_version_proc.stderr.rstrip()
return KubectlVersion(client_version, server_version, stderr)
# An object-oriented representation of a "processing-rules" config key element.
class ProcessingRule(JsonSerializable):
def __init__(self, rule: Dict[str, Any]) -> None:
super().__init__()
self.rule_type: str = rule["rule-type"]
self.event_type_is: List[str] = rule["event-type-is"]
self.and_duration_in_seconds_less_equal_to: Optional[int] = rule.get("and-duration-in-seconds-less-equal-to")
self.and_duration_in_seconds_greater_equal_to: Optional[int] = \
rule.get("and-duration-in-seconds-greater-equal-to")
self.and_compute_name_matches: Optional[str] = rule.get("and-compute-name-matches")
self.and_compute_name_not_matches: Optional[str] = rule.get("and-compute-name-not-matches")
self.and_node_name_matches: Optional[str] = rule.get("and-node-name-matches")
self.and_node_name_not_matches: Optional[str] = rule.get("and-node-name-not-matches")
def to_json(self) -> Dict[str, Any]:
return {
"rule_type": self.rule_type,
"event_type_is": self.event_type_is,
"and_duration_in_seconds_less_equal_to": self.and_duration_in_seconds_less_equal_to,
"and_duration_in_seconds_greater_equal_to": self.and_duration_in_seconds_greater_equal_to,
"and_compute_name_matches": self.and_compute_name_matches,
"and_compute_name_not_matches": self.and_compute_name_not_matches,
"and_node_name_matches": self.and_node_name_matches,
"and_node_name_not_matches": self.and_node_name_not_matches,
}
# A tool that looks at all the rules in List[ProcessingRule]
# and gives a final answer as to whether the event should be handled.
class ProcessingRuleProcessor:
def __init__(self,
processing_rules: List[ProcessingRule],
this_hostnames: ThisHostnames) -> None:
super().__init__()
self.processing_rules: List[ProcessingRule] = processing_rules
self.this_hostnames: ThisHostnames = this_hostnames
def all_considered_should_handle(self, event: ScheduledEvent) -> bool:
for processing_rule in self.processing_rules:
if self._handle_event_if(processing_rule, event):
return True
if self._ignore_event_if(processing_rule, event):
return False
return True
def _handle_event_if(self, processing_rule: ProcessingRule, event: ScheduledEvent) -> bool:
return processing_rule.rule_type == "handle-event-if" and self._process_event_if(processing_rule, event)
def _ignore_event_if(self, processing_rule: ProcessingRule, event: ScheduledEvent) -> bool:
return processing_rule.rule_type == "ignore-event-if" and self._process_event_if(processing_rule, event)
def _process_event_if(self, processing_rule: ProcessingRule, event: ScheduledEvent) -> bool:
res = event.eventtype in processing_rule.event_type_is
res &= (processing_rule.and_duration_in_seconds_less_equal_to is None
or processing_rule.and_duration_in_seconds_less_equal_to >= event.durationinseconds > 0)
res &= (processing_rule.and_duration_in_seconds_greater_equal_to is None
or event.durationinseconds >= processing_rule.and_duration_in_seconds_greater_equal_to
or event.durationinseconds < 0)
res &= (processing_rule.and_compute_name_matches is None
or fnmatch(self.this_hostnames.compute_name, processing_rule.and_compute_name_matches))
res &= (processing_rule.and_compute_name_not_matches is None
or not fnmatch(self.this_hostnames.compute_name, processing_rule.and_compute_name_not_matches))
res &= (processing_rule.and_node_name_matches is None
or fnmatch(self.this_hostnames.node_name, processing_rule.and_node_name_matches))
res &= (processing_rule.and_node_name_not_matches is None
or not fnmatch(self.this_hostnames.node_name, processing_rule.and_node_name_not_matches))
return res
# Configuration - what can be set with parameters to the script.
class Config(JsonSerializable):
def __init__(self, config_file: str, cache_dir: str) -> None:
super().__init__()
config_data = {}
if os.path.isfile(config_file):
with open(config_file, encoding='utf-8') as f:
config_data = json.load(f)
self.config_file: str = config_file
self.cache_dir: str = cache_dir
self.api_metadata_instance: str = \
config_data.get("api-metadata-instance",
"http://169.254.169.254/metadata/scheduledevents?api-version=2020-07-01")
self.api_metadata_scheduledevents: str = \
config_data.get("api-metadata-scheduledevents",
"http://169.254.169.254/metadata/instance?api-version=2021-02-01")
self.polling_frequency_seconds: int = config_data.get("polling-frequency-seconds", 60)
self.socket_timeout_seconds: int = config_data.get("socket-timeout-seconds", 10)
self.processing_rules: List[ProcessingRule] = \
list(map(ProcessingRule, config_data.get("processing-rules", [])))
self.kubectl_drain_options: List[str] = config_data.get("kubectl-drain-options", [])
self.delay_before_uncordon_seconds: int = config_data.get("delay-before-uncordon-seconds", 120)
self.delay_before_program_close_seconds: int = config_data.get("delay-before-program-close-seconds", 5)
def to_json(self) -> Dict[str, Any]:
return {
"config_file": self.config_file,
"cache_dir": self.cache_dir,
"api_metadata_instance": self.api_metadata_instance,
"api_metadata_scheduledevents": self.api_metadata_scheduledevents,
"polling_frequency_seconds": self.polling_frequency_seconds,
"socket_timeout_seconds": self.socket_timeout_seconds,
"processing_rules": self.processing_rules,
"kubectl_drain_options": self.kubectl_drain_options,
"delay_before_program_close_seconds": self.delay_before_program_close_seconds,
}
# The operator is here. Everything else is unnecessary.
class Seoperator2:
def __init__(self,
cache_dir: str,
processing_rules_processor: ProcessingRuleProcessor,
this_hostnames: ThisHostnames,
scheduled_events_manager: ScheduledEventsManager,
kubectl_manager: KubectlManager,
delay_before_uncordon_seconds: int) -> None:
super().__init__()
self.processing_rules_processor: ProcessingRuleProcessor = processing_rules_processor
self.this_hostnames: ThisHostnames = this_hostnames
self.scheduled_events_manager: ScheduledEventsManager = scheduled_events_manager
self.kubectl_manager: KubectlManager = kubectl_manager
self.delay_before_uncordon_seconds: int = delay_before_uncordon_seconds
self.already_processed_events: CacheableList[str] = CacheableList(cache_dir, "already_processed_events")
def handle_scheduled_events(self, events: ScheduledEvents) -> None:
# If an event is finished, it will no longer be reported by the scheduledevents API.
# uncordon nodes affected by scheduled events in the past.
for cached_event in self.kubectl_manager.kubectl_cordon_cache:
if not any(cached_event.eventid == event.eventid for event in events) \
and cached_event.seconds_since_started() > self.delay_before_uncordon_seconds:
print_(f"Found an event from the past {cached_event.eventid}.", eventid=cached_event.eventid)
print_(f"Handling the past event {cached_event.eventid}.", eventid=cached_event.eventid)
self.kubectl_manager.kubectl_uncordon(cached_event)
print_(f"Handled the past event {cached_event.eventid}.", eventid=cached_event.eventid)
if len(events) == 0:
return
print_(f"The current list of planned events includes {len(events)} events.", events=events)
events2: Iterator[ScheduledEvent] = iter(events)
events2 = filter(lambda event: event.eventid not in self.already_processed_events, events2)
events2 = filter(lambda event: self.this_hostnames.compute_name in event.resources, events2)
events2 = filter(lambda event: event.resourcetype == "VirtualMachine", events2)
events2 = filter(lambda event: event.eventstatus == "Scheduled", events2)
events2 = filter(self.processing_rules_processor.all_considered_should_handle, events2)
for event in events2:
print_(f"Found an event {event.eventid} ({event.eventtype}).", eventid=event.eventid)
print_(f"Handling the event {event.eventid}.", eventid=event.eventtype)
self.handle_scheduled_event(event)
print_(f"Handled the event {event.eventid}.", eventid=event.eventid)
self.already_processed_events.append(event.eventid)
def handle_scheduled_event(self, event: ScheduledEvent) -> None:
self.kubectl_manager.kubectl_cordon(event)
self.kubectl_manager.kubectl_drain(event)
if len(event.resources) > 1:
# The event confirmation applies to all resources in the event,
# it is not possible to start the event for one machine.
# As we do not want to start the event prematurely, we are not starting the event at all.
# We could also choose the leader and (on that node) do handling for each resource separately,
# but then we will increase the processing time and/or memory usage (both of them are in short supply).
# Azure will start the event when the time to handle it has passed.
# Azure will then delete the node when it notices that it is not being used.
print_(f"Not starting the scheduled event {event.eventid},"
f"the event affects multiple nodes.", eventid=event.eventid)
self.kubectl_manager.kubectl_cordon_cache.remove(event)
else:
print_(f"Starting the scheduled event {event.eventid}.", eventid=event.eventid)
response = self.scheduled_events_manager.start_an_event(event)
print_(f"Started the scheduled event {event.eventid}, response='{response}'.", eventid=event.eventid)
# Manager that takes care of graceful shutdown.
class LifeManager:
def __init__(self, delay_before_program_close_seconds: int) -> None:
super().__init__()
self.delay_before_program_close_seconds: int = delay_before_program_close_seconds
self.exit_threading_event: threading.Event = threading.Event()
self.signal_counters: Dict[str, int] = collections.defaultdict(int)
for some_signal in [signal.SIGTERM, signal.SIGINT, signal.SIGHUP]:
signal.signal(some_signal, self.signals_handler)
def signals_handler(self, signal_number: int, current_stack_frame: Any):
signal_name = signal.Signals(signal_number).name
self.signal_counters[signal_name] += 1
if self.signal_counters[signal_name] == 1:
print_(f"Program has been interrupted by the {signal_name}, graceful shutdown in progress.")
# Give some time to external monitoring to collect logs.
time.sleep(self.delay_before_program_close_seconds)
self.exit_threading_event.set()
else:
print_(f"Program has been interrupted by the {signal_name} again, forced shutdown in progress.")
sys.exit(-1)
def main():
try:
print_("The operator started working.")
config_file_path = sys.argv[1] if len(sys.argv) > 1 else "/no/custom/config"
cache_dir = sys.argv[2] if len(sys.argv) > 2 else "/do/not/store"
# Disable automatic proxy server detection.
# https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service?tabs=linux#proxies
# https://docs.python.org/3.9/howto/urllib2.html#proxies
proxy_support = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy_support)
urllib.request.install_opener(opener)
# Initializing helper classes.
config = Config(config_file_path,
cache_dir)
life_manager = LifeManager(config.delay_before_program_close_seconds)
this_hostnames = ThisHostnames(config.api_metadata_instance,
config.socket_timeout_seconds)
scheduled_events_manager = ScheduledEventsManager(config.api_metadata_scheduledevents,
config.socket_timeout_seconds,
config.delay_before_program_close_seconds)
kubectl_manager = KubectlManager(config.cache_dir,
config.kubectl_drain_options,
this_hostnames)
processing_rules_processor = ProcessingRuleProcessor(config.processing_rules,
this_hostnames)
operator = Seoperator2(config.cache_dir,
processing_rules_processor,
this_hostnames,
scheduled_events_manager,
kubectl_manager,
config.delay_before_uncordon_seconds)
# Checking the environment.
app_version = datetime.fromtimestamp(os.path.getmtime(__file__)).astimezone().isoformat()
sys_version = sys.version_info
kubectl_version = KubectlManager.kubectl_version()
print_("The operator has been initialized.",
app_version=app_version,
sys_version=sys_version,
kubectl_version=kubectl_version,
config=config,
this_hostnames=this_hostnames,
already_processed_events=operator.already_processed_events,
kubectl_cordon_cache=kubectl_manager.kubectl_cordon_cache,
exit_threading_event_is_set=life_manager.exit_threading_event.is_set())
while True:
# print_("The operator is still working.")
data = scheduled_events_manager.query_for_events()
operator.handle_scheduled_events(data)
if life_manager.exit_threading_event.wait(config.polling_frequency_seconds):
break
except BaseException as e:
traceback_formatted = str(traceback.format_exc())
print_(f"There was a fatal error in my main loop, {e.__class__.__name__}.", traceback=traceback_formatted)
sys.exit(1)
if __name__ == "__main__":
main()
``` |
{
"source": "180888/fyle-interview-de-intern",
"score": 3
} |
#### File: 180888/fyle-interview-de-intern/extract.py
```python
import logging
import json
import os
logger = logging.getLogger(__name__)
'''
Given a directory with receipt file and OCR output, this function should extract the amount
Parameters:
dirpath (str): directory path containing receipt and ocr output
Returns:
float: returns the extracted amount
'''
# dirpath='data/'
def extract_amount(dirpath: str) -> float:
logger.info('extract_amount called for dir %s', dirpath)
# Opening JSON file
full_path=os.path.join(dirpath,'ocr.json')
f = open(full_path,mode='r')
asd=[]
data = json.load(f)
# print(type(data))
for a in reversed(data['Blocks']):
for key,val in a.items():
if(key=='Text'):
# print(key,val)
asd.append(val)
# print(asd)
def isnum(amount1):
bad_chars = ['$', 'Rs', ':', "!"]
print(amount)
# initializing test string
test_string = amount1
# printing original string
# print ("Original String : " + test_string)
# using replace() to
# remove bad_chars
for i in bad_chars :
test_string = test_string.replace(i, '')
print ("Resultant list is : " + (test_string))
try:
amount1=float(test_string)
variab=isinstance(amount1,float)
except ValueError:
return -1
if variab:
return amount1
else:
return -1
amount=0
x=0
itr1=0
for itr in asd:
if(itr.upper()=='CREDIT' or itr.upper()=="PAYMENT" or itr.upper()=="TOTAL VALUE" or itr.upper()=="TOTAL" or itr.upper()=="VALUE" or itr.upper()=="AMOUNT" or itr.upper()=="DEBIT" or itr.upper()=="TOTAL AMOUNT"):
var2=isnum(itr1)
if(var2!=-1):
var1=isinstance(var2,float)
print(var1)
print("$$$$")
if var1:
x=1
continue
if x==1:
amount=itr1
break
itr1=itr
amount=isnum(amount)
f.close()
# print(amount)
logger.info('extract_amount called for dir %s', dirpath)
return amount
``` |
{
"source": "180D-FW-2020/Team8",
"score": 3
} |
#### File: imu/imu_integration/sub.py
```python
import paho.mqtt.client as mqtt
def on_connect(client, userdata, flags, rc):
print("Connection returned result: " + str(rc))
client.subscribe("ece180d/MEAT/imu", qos=1)
def on_disconnect(client, userdata, flags, rc):
if rc != 0:
print('Unexpected Disconnect')
else:
print('Expected Disconnect')
def on_message(client, userdata, message):
print('Received message: "' + str(message.payload) + '" on topic "' + message.topic + '" with QoS ' + str(message.qos))
client = mqtt.Client()
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect_async('mqtt.eclipseprojects.io')
client.loop_start()
while True:
pass
client.loop_stop()
client.disconnect()
```
#### File: src/gui/stringparser.py
```python
def parse_string(phrase:str, marker:str, validNames):
delim = marker
words = phrase.split()
hitlist = []
indlist = []
for index,word in enumerate(words):
try:
if (word == delim) and (words[index+1] in validNames.keys()):
hitlist.append(validNames[words[index+1]])
indlist.insert(0, index)
indlist.insert(0, index+1)
except IndexError:
continue
for ind in indlist:
del words[ind]
return (' '.join(words), hitlist)
```
#### File: imgproc/video_embedder/vid_embed_gui.py
```python
import sys
import time
from os import path
import cv2 as cv
import numpy as np
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import threading
# @desc
# test class for opencv video feed with ar video overlay
# todo: video lag, overlay video not playing
class VideoOverlayCarousel(QObject):
image_data = pyqtSignal(np.ndarray)
def __init__(self, parent=None):
super().__init__(parent)
self.cap = cv.VideoCapture(0)
self.model = cv.imread('model.png')
self.overlay = [cv.VideoCapture('video.mp4'), cv.VideoCapture('video2.mp4')]
self.counter = 0
self.trigger = QBasicTimer()
# @desc
# event trigger for an instantaneous event; use when an event overload is not desired
# or when an event trigger must cause a custom signal to emit
def start(self):
self.trigger.start(0, self)
# run video embedder
def run(self, videoimage, cameraimage):
i = 1 # index for vid carousel
height, width, c = self.model.shape
orb = cv.ORB_create(nfeatures=1000)
kp1, des1 = orb.detectAndCompute(self.model, None)
targetDetected = False
videoimage = cv.resize(videoimage, (width, height)) # resize image to fit model image dimensions
augmentedimage = cameraimage.copy()
kp2, des2 = orb.detectAndCompute(cameraimage, None)
if targetDetected == False:
self.overlay[i].set(cv.CAP_PROP_POS_FRAMES, 0)
self.counter = 0
else:
if self.counter >= self.overlay[i].get(cv.CAP_PROP_FRAME_COUNT):
self.overlay[i].set(cv.CAP_PROP_POS_FRAMES, 0)
self.counter = 0
self.overlay[i].set(cv.CAP_PROP_POS_FRAMES, self.counter)
retval, videoimage = self.overlay[i].read()
videoimage = cv.resize(videoimage, (width, height)) # resize video to fit model image dimensions
matches = self.generateMatches(des1, des2)
#print(len(matches))
if len(matches) > 230:
targetDetected = True
augmentedimage = self.embed(cameraimage, videoimage, kp1, kp2, matches, augmentedimage, height, width)
self.counter += 1
return augmentedimage
# generates matches between two image descriptors
# @param
# des1: model image descriptors
# des2: camera image descriptors
def generateMatches(self, des1, des2):
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matches = bf.match(des1, des2)
matches = sorted(matches, key=lambda x: x.distance)
return matches
# does video embed in camera image
# @param
# cameraimage: cap.read() camera image
# videoimage: video overlay read() image
# kp1: model keypoints
# kp2: camera image keypoints
# matches: BFMatcher between model and camera image descriptors
# augmentedimage: camera feed with video overlay
# height: height of mask
# width: width of mask
def embed(self, cameraimage, videoimage, kp1, kp2, matches, augmentedimage, height, width):
srcpts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dstpts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
matrix, mask = cv.findHomography(srcpts, dstpts, cv.RANSAC, 5)
points = np.float32([[0,0], [0,height], [width,height], [width,0]]).reshape(-1,1,2)
dst = cv.perspectiveTransform(points, matrix)
warpedimage = cv.warpPerspective(videoimage, matrix, (cameraimage.shape[1], cameraimage.shape[0])) # changes video frame shape into model surface
newmask = np.zeros((cameraimage.shape[0], cameraimage.shape[1]), np.uint8)
cv.fillPoly(newmask, [np.int32(dst)], (255, 255, 255))
invertedmask = cv.bitwise_not(newmask)
augmentedimage = cv.bitwise_and(augmentedimage, augmentedimage, mask=invertedmask)
augmentedimage = cv.bitwise_or(warpedimage, augmentedimage)
return augmentedimage
# @desc
# handles timer events triggered by this class
def timerEvent(self, event):
if(event.timerId() != self.trigger.timerId()):
print("timer shit fucked up")
return
retval, videoimage = self.overlay[1].read()
retval, cameraimage = self.cap.read()
augmentedimage = self.run(videoimage, cameraimage)
self.image_data.emit(augmentedimage)
```
#### File: Team8/test/test_animations.py
```python
import base_test
from src.gui.animations import *
import numpy as np
import cv2 as cv
import data.resources
DFORMAT = QImage.Format_RGB888 # color space
class TestVideo(QObject):
image_data = pyqtSignal(np.ndarray)
def __init__(self, parent=None):
super().__init__(parent)
self.cap = cv.VideoCapture(0)
self.trigger = QBasicTimer()
def start(self):
self.trigger.start(0, self)
def timerEvent(self, event):
if(event.timerId() != self.trigger.timerId()):
print("error: timer ID mismatch")
return
read, frame = self.cap.read()
if read:
self.image_data.emit(frame)
class DisplayWidget(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.image = QImage()
def _array2qimage(self, image : np.ndarray):
h, w, c = image.shape
bpl = w*3 # bytes per line
image = QImage(image.data, w, h, bpl, DFORMAT)
image = image.rgbSwapped()
return image
def setImage(self, image):
self.image = self._array2qimage(image)
self.setFixedSize(self.image.size())
self.update()
def paintEvent(self, event):
p = QPainter(self)
p.drawImage(0, 0, self.image)
self.image = QImage()
class MainWidget(QWidget):
spawn = pyqtSignal(list)
def __init__(self, parent=None):
super().__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
# widgets and objects
self.display = DisplayWidget()
self.video = TestVideo()
self.emotebox = EmoteWidget()
self.frame_timer = QTimer(self)
self.layout = QGridLayout()
self.setMainLayout()
# signals and slots
self.spawn.connect(lambda x: self.emotebox.spawn_emotes(x))
self.video.image_data.connect(lambda x: self.display.setImage(x))
self.video.start()
def setMainLayout(self):
self.layout.addWidget(self.display, 0, 0, alignment=Qt.AlignCenter)
self.layout.addWidget(self.emotebox, 0, 0, alignment=Qt.AlignCenter)
self.setLayout(self.layout)
def keyPressEvent(self, event):
super(MainWidget, self).keyPressEvent(event)
if event.key() == Qt.Key_Q:
self.spawn.emit([1,1,1])
if event.key() == Qt.Key_W:
self.spawn.emit([2])
if event.key() == Qt.Key_E:
self.spawn.emit([3])
if event.key() == Qt.Key_R:
self.spawn.emit([4])
if event.key() == Qt.Key_T:
self.spawn.emit([5])
class testUI:
def __init__(self):
self.qapp = QApplication(sys.argv)
self.window = QMainWindow()
# self.main_widget = EmoteWidget()
self.main_widget = MainWidget()
self.window.setCentralWidget(self.main_widget)
self.window.show()
sys.exit(self.qapp.exec_())
if __name__ == '__main__':
test = testUI()
``` |
{
"source": "18101224/final",
"score": 3
} |
#### File: final/sourcecode/final.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.formula.api as smf
from scipy import stats
def data(filename):
f= open(filename,'r')
data=[]
i=1
for line in f.readlines():
newline=line.split()
newline[0]=i #날짜는 그냥 경과일로
i+=1
for index in range(1,len(newline)):
if newline[index]=='-':
newline[index]=0 #1000이 넘으면 1,000으로 표시돼서 정수값으로 변환
elif len(newline[index]) >=5 and len(newline[index]) <= 7:
a,b=map(int,newline[index].split(','))
newline[index]=a*1000+b
elif len(newline[index]) >=8 :
a,b,c=map(int,newline[index].split(','))
newline[index]=a*1000000+b*1000+c
else:
newline[index]=int(newline[index])
data.append(newline)
return data
def cof(x): #셀프피팅에 사용할 삼각함수의 계수
return ((2*np.pi*x)/(((((x)/201)//1))*200))
def pcrline(x,liss): #학습된 곡선 함수
return 2*liss[0]*x +8*liss[1]*(np.sin(cof(x))*((((np.abs(x)/200))//1)**2))+liss[2]
pcr=np.array(data('pcr.txt'))#pcr 날짜 전체확진자 국내발생 해외유입 사망자
vaccine=np.array(data('vaccine.txt'))#vaccine 날짜 전체1차 전체2차 전체3차 AZ1 AZ2 F1 F2 Y1 M1 M2 F3 M3 Y3
lmp='local maximum point'
#전체 확진자 추이
x=np.linspace(pcr.T[0].min(),pcr.T[0].max(),len(pcr.T[0])) #날짜 x데이터 생성
y=pcr.T[1] #확진자수 데이터 생성
plt.plot(x,y,'.')
plt.plot(x[220],y[220],'o','red',label=lmp) #지역 max point 표시
plt.plot(x[340],y[340],'o','red',label=lmp)
plt.plot(x[569],y[569],'o','red',label=lmp)
plt.title("positive scatter for whole range")
plt.xlabel('day after debut of covid19')
plt.ylabel('number of positive')
plt.legend()
plt.show()
#전체 사망자 추이
plt.title("death rate for whole range")
plt.ylabel('death rate')
plt.xlabel('day after debut of covid19')
plt.plot(x,pcr.T[4]/pcr.T[1],'-') #경과일에 대응하는 사망자/확진자 수 =>사망률
plt.show()
#위드코로나 시행 전 코로나 확진자 추이
plt.plot(x[:len(x)-50],y[:len(y)-50],'.')
plt.plot(x[220],y[220],'o','red')
plt.plot(x[340],y[340],'o','red') #120
plt.plot(x[569],y[569],'o','red') #220
plt.xlabel('day after debut of covid and before WithCorona')
plt.ylabel('number of positive')
plt.title("positive rate before WithCorona")
plt.show()
###############################################################################################################
#확진자 라인피팅
x1=x[200:len(x)-50] #본격적으로 유행하기 시작한 날부터 위드코로나 시행 전까지
y2=y[200:len(y)-50]
poly_fit=np.polyfit(x1,y2,7) #7차 다항식으로 확진자수 학습
poly_1d=np.poly1d(poly_fit)
xs=np.linspace(x1.min(),x1.max()+50) #위드코로나 시행 후 까지 확장된 날짜변수xs
ys=poly_1d(xs) #xs에 대응하는 확진자함수값
plt.plot(xs,np.abs(ys),'k-',label='line pitting by polyfit(opensource)') #학습된 곡선 피팅(검은색)
##############################################################################################################
###직접 확진자 곡선 짜기###
y1=np.zeros(x1.shape) ###삼각함수값을 저장할 y1열 생성
i=0
for node in x1:
y1[i]=np.abs((np.cos(cof(node)))*((((np.abs(node)/200))//1)))#날짜 변수에 대응하는 삼각함수값 저장
i+=1
coefs=np.vstack((x1,y1,np.ones(x1.shape))) #행렬A로 작성
coefs=np.matmul(np.linalg.pinv(coefs.T),y[:len(x1)]) #A역행렬과 실제 확진자수 열행렬을 곱하여 계수값튜플을 얻어냄
plt.plot(x1,pcrline(x1,coefs),'r-',label='line pitting by myself') #내가 직접 짠 곡선
###############################################################################################################
###위드코로나 이후 직선 만들기###
x3=x[len(x)-50:] #위드코로나 이후 데이터만 추출
y3=y[len(y)-50:] #위드코로나 이후 데이터만 추출
plt.plot(x3,y3,'y.',alpha=1,label='actual pcr positive after WithCorona') #위드코로나 이후 실제확진자 분포
ploy_fit1=np.polyfit(x3,y3,1)#위드 코로나 이후 확진자수 학습
poly_1d=np.poly1d(ploy_fit1)#위드 코로나 이후 확진자수 학습
xs1=np.linspace(x3.min(), x3.max())
ys1=poly_1d(xs1)#학습된 함수에 날짜변수를 넣은 예측확진자수
plt.plot(xs1,ys1,'y-',label='line pitting after withCorona') #위드코로나 이후 학습된 직선 그리기
###############################################################################################################
###실제 확진자 분포###
plt.plot(x[:len(x)-50],y[:len(y)-50],'b.',alpha=0.3,label='actual pcr positive')#실제 확진자 분포
###############################################################################################################
###그래프 메타데이터 값 작성
plt.plot(x[220],y[220],'o','red')
plt.plot(x[340],y[340],'o','red')
plt.plot(x[569],y[569],'o','red')
plt.annotate('local max',xy=(x[220],y[220]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.annotate('local max',xy=(x[340],y[340]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.annotate('local max',xy=(x[569],y[569]),arrowprops=dict(facecolor='black',shrink=0.0005,alpha=0.7))
plt.xlabel('day after debut of covid and before WithCorona')
plt.ylabel('number of positive')
plt.title('predicting pcr positives with line pitting')
plt.legend()
plt.show()
###############################################################################################################
# 내가 라인 피팅에 사용한 함수
plt.title('the sine wave used self line pitting')
x4=np.linspace(200,100000)
plt.plot(x4,100*np.cos(cof(x4)),'k-')
plt.legend()
plt.show()
###############################################################################################################
# 백신과 사망률 관계 추이
pop=51821669 #대한민국 총 인구 FOR 백신 접종률
###############################################################################################################
#데이터 가공
data={
'positive':pcr[403:len(pcr)-2,1],
'deathRate':((pcr[403:len(pcr)-2,4]/pcr[403:len(pcr)-2,1])*10e6)//1,
'vaccine':vaccine[:,1]/pop,
'AZ':vaccine[:,5]/pop,
'Fizer':vaccine[:,7]/pop, #데이터에 쓰일 확진자,총 백신 접종상황, 백신별 접종 현황
'Y':vaccine[:,8]/pop,
'Modern':vaccine[:,10]/pop
}
data=pd.Series(data)
###############################################################################################################
###############################################################################################################
#백신과 확진자수 분포에대한 연관성 3D 시각화
ax=plt.axes(projection='3d')
ax.set_xlabel('days')
ax.set_zlabel('number of positive')
ax.set_ylabel('vaccination of the day')
ax.view_init(10,-10) #3차원 자료 시점 변경
ax.scatter3D(np.linspace(403,len(pcr)-1,len(data['positive'])),data['vaccine'],data['positive'])#(날짜,확진자수,백신접종률)
plt.show() #해당 백신 접종률에 대응하는 확진자 수
###############################################################################################################
#경과일에 따른 백신접종률 시각화
plt.plot(np.linspace(0,len(data['vaccine'])-1,len(data['vaccine']-1)),data['vaccine'])
plt.xlabel('days')
plt.ylabel('vaccination')
plt.show()
###############################################################################################################
#백신 접종률에 따른 확진자수 2차원 시각화
plt.plot(data['vaccine'],data['positive'])
plt.xlabel('vaccination')
plt.ylabel('number of positive')
plt.show()
###############################################################################################################
#백신접종률과 사망률의 관계
x=np.array(data['vaccine']) #백신접종률과 사망률 변수 생성
y=np.array(data['deathRate']) #백신접종률과 사망률 변수 생성
plt.scatter(x,y, label='actual deathRate') #실제 백신 접종률에 따른 사망률 분포
###############################################################################################################
#백신접종률 대 사망률에 대한 라인 피팅
poly_fit=np.polyfit(x,y,4) #np의 poly_fit을 사용한 라인피팅
poly_1d=np.poly1d(poly_fit) #np의 poly_fit을 사용한 라인피팅
xs=np.linspace(x.min(),x.max()) #np의 poly_fit을 사용한 라인피팅
ys=poly_1d(xs) #np의 poly_fit을 사용한 라인피팅
plt.plot(xs,ys,color='red',label='line pitting by poly_fit')#피팅한 곡선 그리기
###############################################################################################################
#백신 접종률 대 사망률에 대한 회귀분석
formular = 'deathRate ~ vaccine' #vaccine 변수를 이용해 사망률을 학습
result=smf.ols(formular,data).fit() #statsmodels를 이용한 선형 분석
print('백신 접종률과 사망률에 대한 분석','\n',result.summary())
xs1=np.linspace(xs.min(),xs.max())
ys1=6.23e-05*xs1+5.296e+4 #학습결과에 나온 계수를 이용해 y값 입력
plt.plot(xs1,ys1,'green',label='regression by overall vaccine') #1차원에는 잘 맞지 않는다.
###############################################################################################################
#백신 별 접종률에 대한 사망률 회귀분석
formula2='deathRate~AZ+Fizer+Y+Modern' #백신별로 변수를 만들어 학습
result2=smf.ols(formula2,data).fit() #학습
print('백신별 사망률에 대한 분석','\n',result2.summary())
def deathForVaccine(A,F,Y,M): #학습 결과로 나온 계수들을 이용해 함수 작성
return 7.365e+04 + 3.923e+04*A + 9.816e+04*F - 2.431e+06*Y + 3.382e+05*M
plt.plot(x,deathForVaccine(data['AZ'],data['Fizer'],data['Y'],data['Modern']),'k-',label='regression by sum of each vaccine')
#학습결과 곡선 플로팅
I=np.eye(4)
for i in range(4):
now=x[-1]*I[i]
print(deathForVaccine(now[0],now[1],now[2],now[3]))
#한 종류의 백신으로만 맞았을 때의 사망률
#AZ: 1퍼센트, Fizer 1.5퍼센트, Y:음수값, Modern: 3퍼센트가 나온다
###############################################################################################################
#보정 진행
for item in ('AZ','Fizer','Modern'):
for i in range(91,len(data[item])):
data[item][i]=data[item][i]-data[item][i-90] #항체감소 보정, 90일 전의 접종인원을 제외한다.
fomular3='deathRate~AZ+Fizer+Modern' #AZ,화이자,모더나로만 변수를 구성,
result3=smf.ols(fomular3,data).fit() #학습
print("보정 후 결과\n",result3.summary()) #결과 출력
def deathForVaccine2(A,F,M): #학습 결과의 계수를 이용하여 함수 작성
return 2.492e+04*A-5.058e+05*F+1.696e+06*M+7.144e+04
plt.plot(x,deathForVaccine2(data['AZ'], data['Fizer'], data['Modern']),'y-',label='result after correction')
#보정후 학습의 결과 플로팅
I2=np.eye(3)
for i in range(3):
vaccines=('AZ','Fizer','Modern')
now=x[-1]*I2[i]
print(vaccines[i],deathForVaccine2(*I2[i]))
#보정학습 후, 한 가지 백신으로 90퍼센트의 백신접종률을 달성하였을 때 사망률 예측
#AZ: 1퍼센트 미만, Fizer: 음수값, Modern: 2퍼센트 미만 (약 1.7)
###############################################################################################################
#그래프 메타데이터 셋
plt.legend()
plt.xlabel('vaccination rate')
plt.ylabel('death rate')
plt.title('deathRate with vaccination rate')
plt.show()
``` |
{
"source": "18121861183/ztag",
"score": 3
} |
#### File: ztag/ztag/sqlite_util.py
```python
import json
import sqlite3
# sql_str = '''CREATE TABLE "colasoft_ftp" (
# "id" integer NOT NULL PRIMARY KEY AUTOINCREMENT,
# "name" text,
# "fingerprint" text,
# "subprotocol" text,
# "protocol" text,
# "port" integer,
# "local_manufacturer" text,
# "local_product" text,
# "local_version" text,
# "local_revision" text,
# "local_description" text,
# "global_manufacturer" text,
# "global_product" text,
# "global_version" text,
# "global_revision" text,
# "global_os" text,
# "global_os_version" text,
# "global_os_description" text,
# "global_device_type" text,
# "global_description" text,
# "create_time" integer
# )'''
class Database:
def __init__(self):
self.conn = sqlite3.connect('fingerprint.db')
self.cursor = self.conn.cursor()
def query_rules(self, table_name):
return self.cursor.execute("select * from "+table_name)
if __name__ == '__main__':
conn = sqlite3.connect('fingerprint.db')
cursor = conn.cursor()
result = cursor.execute("select * from colasoft_ftp")
column = ['local_manufacturer',
'local_product',
'local_version',
'local_revision',
'local_description',
'global_manufacturer',
'global_product',
'global_version',
'global_revision',
'global_os',
'global_os_version',
'global_os_description',
'global_device_type',
'global_description']
index = range(6, 20)
try:
rule_object = json.loads('{"regex": "121212121"}')
if rule_object.keys().__contains__('regex'):
print rule_object['regex']
except ValueError:
pass
def database():
return database()
``` |
{
"source": "18137131021/chenguilin_april_exam",
"score": 2
} |
#### File: home_application/unins/ESB.py
```python
__pet__ = '''
┏┓ ┏┓
┏━┛┻━━━━━┛┻━━┓
┃ ☃ ┃
┃ ┳━┛ ┗━┳ ┃
┃ ┻ ┃
┗━━┓ ┏━━┛
┃ ┗━━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗━┓━┓┏━━┳┓┏━━┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
'''
from conf.default import APP_ID, APP_TOKEN
from blueking.component.shortcuts import get_client_by_request, get_client_by_user
from django.http.request import HttpRequest
from blueking.component.client import ComponentClient
class ESBApi(object):
"""
需要传request参数
"""
def __init__(self, param):
if isinstance(param, HttpRequest):
self.__client = get_client_by_request(param) # 获取用户登陆态
self.username = param.user.username
else:
self.__client = get_client_by_user(param) # 获取到用户对象
self.username = param
self.__param = {
"bk_app_code": APP_ID,
"bk_app_secret": APP_TOKEN,
'bk_username': self.username
}
def search_business(self, page=None):
if page is None:
page = {"start": 0, "limit": 200}
param = self.__param
fields = ["bk_biz_id", "bk_biz_name"]
param['fields'] = fields
param['page'] = page
result = self.__client.cc.search_business(param)
return result
# 查询集群
def search_set(self, page=None, bk_biz_id=None):
if page is None:
page = {"start": 0, "limit": 200}
param = self.__param
fields = ["bk_set_id", "bk_set_name"]
param['fields'] = fields
param['page'] = page
param['bk_biz_id'] = bk_biz_id
result = self.__client.cc.search_set(param)
return result
# 根据业务查询主机
# def search_host(self, biz_id, page=None):
# try:
# if page is None:
# page = {"start": 0, "limit": 20}
# param = self.__param
# param['bk_biz_id'] = biz_id
# param['page'] = page
# param['condition'] = []
# result = self.__client.cc.search_host(param)
# except Exception, e:
# result = {'message': e}
#
# return result
# 根据业务和集群查询ip
def search_host(self, biz_id, set_id, page=None):
try:
if page is None:
page = {"start": 0, "limit": 20}
condition = [
{
"bk_obj_id": "biz",
"fields": [],
"condition": [
{
"field": "bk_biz_id",
"operator": "$eq",
"value": int(biz_id)
}
]
},
{
"bk_obj_id": "set",
"fields": [],
"condition": [
{
"field": "bk_set_id",
"operator": "$eq",
"value": int(set_id)
}
]
},
]
param = self.__param
param['bk_biz_id'] = biz_id
param['page'] = page
param['condition'] = condition
result = self.__client.cc.search_host(param)
except Exception, e:
result = {'message': e}
return result
# 根据业务集群模块查询ip
# def search_host(self, bk_biz_id, set_id, module_id, page=None):
# try:
# if page is None:
# page = {"start": 0, "limit": 20}
# condition = [
# {
# "bk_obj_id": "biz",
# "fields": [],
# "condition": [
# {
# "field": "bk_biz_id",
# "operator": "$eq",
# "value": int(bk_biz_id)
# }
# ]
# },
# {
# "bk_obj_id": "set",
# "fields": [],
# "condition": [
# {
# "field": "bk_set_id",
# "operator": "$eq",
# "value": int(set_id)
# }
# ]
# },
# {
# "bk_obj_id": "module",
# "fields": [],
# "condition": [
# {
# "field": "bk_module_id",
# "operator": "$eq",
# "value": int(module_id)
# }
# ]
# }
# ]
# param = self.__param
# param['page'] = page
# param['condition'] = condition
# result = self.__client.cc.search_host(param)
# except Exception, e:
# result = {'message': e}
# return result
# 一般不使用的接口,查询当前用户的业务
def get_app_by_user(self):
"""
获取当前用户下的业务列表
:return:
"""
try:
param = self.__param
self.__client.set_bk_api_ver('')
result = self.__client.cc.get_app_by_user(param)
except Exception, e:
result = {'message': e}
return result
# 传当前用户执行脚本
def fast_execute_script(self, bk_biz_id=None, script_id=None, script_content=None, ip_list=None, script_param=None,
account=None, script_type=None):
'''
执行脚本
:param bk_biz_id:
:param script_id:
:param script_content:
:param ip_list:
:param script_param:
:param account:
:return:
'''
param = self.__param
if account is None:
account = 'root'
if not script_id:
param['script_id'] = script_id
param["bk_biz_id"] = bk_biz_id
param["script_content"] = script_content
param["account"] = account
param["ip_list"] = ip_list
param["script_type"] = script_type
if script_param is not None:
param['script_param'] = script_param
result = self.client.job.fast_execute_script(param)
# print result, '--------快速执行脚本结果'
return result
class ESBComponentApi(object):
'''
不需要request参数的esb
'''
def __init__(self):
self.__param = {
"app_code": APP_ID,
"app_secret": APP_TOKEN,
'bk_username': "admin"
}
common_args = {'username': 'admin'}
self.client = ComponentClient(
# APP_ID 应用ID
app_code=APP_ID,
# APP_TOKEN 应用TOKEN
app_secret=APP_TOKEN,
common_args=common_args
)
def fast_execute_script(self, bk_biz_id=None, script_id=None, script_content=None, ip_list=None, script_param=None,
account=None, script_type=None):
'''
执行脚本
:param bk_biz_id:
:param script_id:
:param script_content:
:param ip_list:
:param script_param:
:param account:
:return:
'''
param = self.__param
if account is None:
account = 'root'
if not script_id:
param['script_id'] = script_id
param["bk_biz_id"] = bk_biz_id
param["script_content"] = script_content
param["account"] = account
param["ip_list"] = ip_list
param["script_type"] = script_type
if script_param is not None:
param['script_param'] = script_param
result = self.client.job.fast_execute_script(param)
# print result, '--------快速执行脚本结果'
return result
def get_job_instance_log(self, bk_biz_id=None, job_instance_id=None):
param = {
"bk_app_code": self.__param['app_code'],
"bk_app_secret": self.__param['app_secret'],
"bk_biz_id": bk_biz_id,
"job_instance_id": job_instance_id
}
result = self.client.job.get_job_instance_log(param)
return result
def the_biz_name(self, bk_biz_id=None):
param = {
"bk_app_code": self.__param['app_code'],
"bk_app_secret": self.__param['app_secret'],
"bk_username": "admin",
"fields": [
"bk_biz_id",
"bk_biz_name"
],
"condition": {
"bk_biz_id": bk_biz_id
}
}
result = self.client.cc.search_business(param)
return result
def test_data(self):
pass
# import aix_base64_script
# ip_cloud = 0
# ip = '192.168.50.235'
# script_data = aix_base64_script.stop_script_data
# bk_biz_id = 28
# args = {
# "bk_app_code": APP_ID,
# "bk_app_secret": APP_TOKEN,
# "bk_username": "admin",
# "bk_biz_id": int(bk_biz_id),
# "script_content": script_data,
# "script_type": 1,
# "account": "root",
# "ip_list": [
# {
# "bk_cloud_id": int(ip_cloud),
# "ip": ip
# }
# ],
# }
# resturl = self.client.job.fast_execute_script(**args)
#
# return resturl
``` |
{
"source": "18150167970/example-mining-incremental-learning",
"score": 2
} |
#### File: example-mining-incremental-learning/utils/draw.py
```python
from __future__ import absolute_import
import os
import torch as t
import cv2
import numpy as np
from utils import array_tool as at
from utils.config import opt
from tqdm import tqdm
import six
def draw(dataloader, faster_rcnn, test_num=100):
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, id_) in enumerate(dataloader):
sizes = [sizes[0][0].item(), sizes[1][0].item()]
pred_bboxes_, pred_labels_, pred_scores_, _feature = faster_rcnn.predict(imgs, [
sizes])
img_file = opt.voc_data_dir + '/JPEGImages/' + str(id_[0]) + '.jpg'
image = cv2.imread(img_file)
# 转成 numpy格式
bboxs = at.tonumpy(pred_bboxes_[0])
name = at.tonumpy(pred_labels_[0]).reshape(-1)
score = at.tonumpy(pred_scores_[0]).reshape(-1)
# 保存测试集每一轮预测的结果 最好加个epoch判断 每10轮保存一次 不然太浪费时间
for i in range(len(name)):
xmin = int(round(float(bboxs[i, 1])))
ymin = int(round(float(bboxs[i, 0])))
xmax = int(round(float(bboxs[i, 3])))
ymax = int(round(float(bboxs[i, 2])))
if score[i] <= opt.threshold:
continue
cv2.rectangle(image, (xmin, ymin),
(xmax, ymax), (0, 0, 255), 1)
cv2.putText(image, opt.VOC_BBOX_LABEL_NAMES[name[i]], (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * image.shape[0], (0, 0, 255), 1)
cv2.putText(image, str(score[i])[0:3], (xmin + 30, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * image.shape[0], (0, 0, 255), 1)
cv2.imwrite('result/' + str(id_[0]) + '.jpg', image)
def draw_predict(pred_bboxes_, pred_labels_, pred_scores_):
pred_bboxes1 = iter(pred_bboxes_)
pred_labels1 = iter(pred_labels_)
pred_scores1 = iter(pred_scores_)
if opt.nms_type == 'soft_nms':
write_path = 'result/'
else:
write_path = 'result_nms/'
if opt.nms_use_label == True:
write_path = 'label_' + write_path
print (write_path)
f = open('/media/chenli/E/VOCdevkit/VOC2007/ImageSets/Main/test2.txt')
for pred_bbox, pred_label, pred_score in six.moves.zip(pred_bboxes1, pred_labels1, pred_scores1):
id_ = f.readline()[:-1]
# print id_
img_file = '/media/chenli/E/VOCdevkit/VOC2007/JPEGImages/' + \
str(id_) + '.jpg'
image = cv2.imread(img_file)
# 转成 numpy格式
bboxs = at.tonumpy(pred_bbox)
name = at.tonumpy(pred_label).reshape(-1)
score = at.tonumpy(pred_score).reshape(-1)
# 保存测试集每一轮预测的结果 最好加个epoch判断 每10轮保存一次 不然太浪费时间
for i in range(len(name)):
xmin = int(round(float(bboxs[i, 1])))
ymin = int(round(float(bboxs[i, 0])))
xmax = int(round(float(bboxs[i, 3])))
ymax = int(round(float(bboxs[i, 2])))
if score[i] <= opt.threshold:
continue
cv2.rectangle(image, (xmin, ymin),
(xmax, ymax), (0, 0, 255), 1)
cv2.putText(image, opt.VOC_BBOX_LABEL_NAMES[name[i]], (xmin, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * image.shape[0], (0, 0, 255), 1)
cv2.putText(image, str(score[i])[0:3], (xmin + 30, ymin - 10),
cv2.FONT_HERSHEY_SIMPLEX, 1e-3 * image.shape[0], (0, 0, 255), 1)
cv2.imwrite(write_path + str(id_) + '.jpg', image)
``` |
{
"source": "18150167970/knowledge-distillation",
"score": 2
} |
#### File: knowledge-distillation/utils/utils.py
```python
def flip_bbox(bbox, size, x_flip=False):
# 水平翻转
H, W = size
if x_flip:
x_max = W - bbox[:, 1]
x_min = W - bbox[:, 3]
bbox[:, 1] = x_min
bbox[:, 3] = x_max
return bbox
def py_cpu_nms(gt_bboxes_, gt_labels_, teacher_pred_bboxes_, teacher_pred_labels_, teacher_pred_scores_, thresh=0.7):
# 使用nms判断预测bbox是否与真标签iou大小接近,如果接近就去除
x1 = gt_bboxes_[:, 0]
y1 = gt_bboxes_[:, 1]
x2 = gt_bboxes_[:, 2]
y2 = gt_bboxes_[:, 3]
x1_pre = teacher_pred_bboxes_[:, 0]
y1_pre = teacher_pred_bboxes_[:, 1]
x2_pre = teacher_pred_bboxes_[:, 2]
y2_pre = teacher_pred_bboxes_[:, 3]
# scores = socres # bbox打分
areas = (x2_pre - x1_pre + 1) * (y2_pre - y1_pre + 1)
areas2 = (x2 - x1 + 1) * (y2 - y1 + 1)
# keep为最后保留的边框
keep = []
inds = []
# print pred_scores[0]
for i in range(len(teacher_pred_bboxes_)):
if teacher_pred_scores_[i] <= 0.5:
flag = 1
continue
flag = 0
# 计算窗口i与其他所有窗口的交叠部分的面积
xx1 = np.maximum(x1_pre[i], x1)
yy1 = np.maximum(y1_pre[i], y1)
xx2 = np.minimum(x2_pre[i], x2)
yy2 = np.minimum(y2_pre[i], y2)
# print xx1.shape
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas2 - inter)
for j in range(len(gt_bboxes_)):
if ovr[j] >= thresh:
flag = 1
# print 1
break
if flag == 0:
inds.append(i)
gt_scores_ = t.ones(gt_labels_.shape)
teacher_pred_bboxes = teacher_pred_bboxes_[inds]
teacher_pred_labels = teacher_pred_labels_[inds]
teacher_pred_scores = teacher_pred_scores_[inds]
teacher_pred_bboxes = teacher_pred_bboxes.astype(
np.float32)
teacher_pred_labels = teacher_pred_labels.astype(np.int32)
teacher_pred_scores = teacher_pred_scores.astype(
np.float32)
teacher_pred_bboxes_ = at.totensor(teacher_pred_bboxes)
teacher_pred_labels_ = at.totensor(teacher_pred_labels)
teacher_pred_scores_ = at.totensor(teacher_pred_scores)
gt_bboxes_ = gt_bboxes_.cuda()
gt_labels_ = gt_labels_.cuda()
gt_scores_ = gt_scores_.cuda()
gt_bboxes_ = t.cat((gt_bboxes_, teacher_pred_bboxes_))
gt_labels_ = t.cat((gt_labels_, teacher_pred_labels_))
gt_scores_ = t.cat((gt_scores_, teacher_pred_scores_))
return gt_bboxes_, gt_labels_, gt_scores_
def eval(dataloader, faster_rcnn, test_num=10000):
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
for ii, (imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_, id_) in tqdm(enumerate(dataloader)):
if len(gt_bboxes_) == 0:
continue
sizes = [sizes[0][0].item(), sizes[1][0].item()]
pred_bboxes_, pred_labels_, pred_scores_, _ = faster_rcnn.predict(imgs, [
sizes])
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
if ii == test_num:
break
# 这个评价函数是返回ap 和map值 其中传入的pred_bboxes格式为3维的数组的list格式,
# 也就是说每个list都是一个3维数组(有batch的考量)
# 其他的同理
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
return result
``` |
{
"source": "18165299273/PythonStudyBase",
"score": 3
} |
#### File: DemoTest/Until/StringControl.py
```python
class StringControl():
def __init__(self, str):
self.str = str
# 首字母大写
def FirstLetterUpper(self):
return self.str.title()
def LetterUpper(self):
return self.str.upper()
def LetterLower(self):
return self.str.lower()
```
#### File: DemoTest/Until/UntilBase.py
```python
from typing import Type
# str为字符串
# str.isalnum() 所有字符都是数字或者字母
# str.isalpha() 所有字符都是字母
# str.isdigit() 所有字符都是数字
# str.islower() 所有字符都是小写
# str.isupper() 所有字符都是大写
# str.istitle() 所有单词都是首字母大写,像标题
# str.isspace() 所有字符都是空白字符、\t、\n、\r
# 判断是否为数字
def isNumber(param):
try:
float(param)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(param)
return True
except (TypeError, ValueError):
pass
return False
``` |
{
"source": "18191171661/AutoEncoder-tensorflow1.01",
"score": 3
} |
#### File: Autoencoder/CLASS/AGN_Autoencoder.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import numpy as np
import tensorflow as tf
import sklearn.preprocessing as prep
from tensorflow.examples.tutorials.mnist import input_data
from matplotlib import pyplot as plt
from scipy.misc import imsave
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low,
maxval = high,
dtype = tf.float32,
seed = 33)
class AdditiveGaussianNoiseAutoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function = tf.nn.softplus,
optimizer = tf.train.AdamOptimizer(), scale = 0.1):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
self.scale = tf.placeholder(tf.float32)
self.training_scale = scale
network_weights = self._initial_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x + scale * tf.random_normal((n_input,)),
self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initial_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype = tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype = tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype = tf.float32))
return all_weights
def once_fit(self, X):
cost, _ = self.sess.run((self.cost, self.optimizer), feed_dict = {self.x: X, self.scale: self.training_scale})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X, self.scale: self.training_scale})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict = {self.x: X, self.scale: self.training_scale})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size = self.weights['b1'])
return self.sess.run(self.reconstruction, feed_dict = {self.hidden: hidden})
def reconstraion(self, X):
return self.sess.run(self.reconstruction, feed_dict = {self.x: X, self.scale: self.training_scale})
def get_weights(self):
return self.sess.run(self.weights['w1'])
def get_biases(self):
return self.sess.run(self.weights['b1'])
def AGN_main():
print('starting...')
print('loading data,please wait a moment...')
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def standard_scale(X_train, X_test):
preprocess = prep.StandardScaler().fit(X_train)
X_train = preprocess.transform(X_train)
X_test = preprocess.transform(X_test)
return X_train, X_test
def get_batch_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index : start_index + batch_size]
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
nb_epoch = 20
batch_size = 128
display_time = 1
autoencoder = AdditiveGaussianNoiseAutoencoder(n_input = 784,
n_hidden = 200,
transfer_function = tf.nn.relu,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001),
scale = 0.01)
if os.path.exists(os.path.dirname('result')):
os.rename('result','result_before')
path = os.getcwd()
print(path)
paths = path + str('\\result')
print(paths)
os.chdir(paths)
print(os.getcwd())
else:
os.mkdir('result')
path = os.getcwd()
print(path)
paths = path + str('\\result')
print(paths)
os.chdir(paths)
print(os.getcwd())
for epoch in range(nb_epoch):
total_batch = int(n_samples / batch_size)
avg_cost = 0.
for i in range(total_batch):
batch_data = get_batch_data(X_train, batch_size)
cost = autoencoder.once_fit(batch_data)
avg_cost += cost / n_samples * batch_size
weights = autoencoder.get_weights
bias = autoencoder.get_biases
reconstract = autoencoder.reconstraion(batch_data)
picture = np.reshape(reconstract, [128, 28, 28, -1])
#print(picture.shape)
result = picture[1:2]
#print(result.shape)
data = np.reshape(result, [28, 28])
imsave('%d.jpg' %(i), data)
if epoch % display_time == 0:
print('Epoch:', '%04d' %(epoch + 1), 'cost =','{:.9f}'.format(avg_cost))
print('Total cost is: ' + str(autoencoder.calc_total_cost(X_test)))
print('weights is:', weights)
print('bias is:', bias)
print(reconstract.shape)
print('recontruct result is:', reconstract)
plt.plot(data)
plt.show()
print('ending...')
if __name__ == '__main__':
AGN_main()
```
#### File: Autoencoder/CLASS/VAE_Autoencoder.py
```python
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import tensorflow as tf
import numpy as np
import sklearn.preprocessing as prep
from matplotlib import pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
from scipy.misc import imsave
def xavier_init(fan_in, fan_out, constant = 1):
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval = low,
maxval = high,
dtype = tf.float32,
seed = 33)
class VariationalAutoencoder(object):
def __init__(self, n_input, n_hidden, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.z_mean = tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1'])
self.z_log_sigma_sq = tf.add(tf.matmul(self.x, self.weights['log_sigma_w1']), self.weights['log_sigma_b1'])
# sample from gaussian distribution
eps = tf.random_normal(tf.stack([tf.shape(self.x)[0], self.n_hidden]), 0, 1, dtype = tf.float32)
self.z = tf.add(self.z_mean, tf.multiply(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))
self.reconstruction = tf.add(tf.matmul(self.z, self.weights['w2']), self.weights['b2'])
# cost
reconstr_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
latent_loss = -0.5 * tf.reduce_sum(1 + self.z_log_sigma_sq
- tf.square(self.z_mean)
- tf.exp(self.z_log_sigma_sq), 1)
self.cost = tf.reduce_mean(reconstr_loss + latent_loss)
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['log_sigma_w1'] = tf.Variable(xavier_init(self.n_input, self.n_hidden))
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['log_sigma_b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.z_mean, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = np.random.normal(size=self.weights["b1"])
return self.sess.run(self.reconstruction, feed_dict={self.z_mean: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
def VAE_main():
print('starting...')
print('loading data,please wait moment...')
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
def min_max_scale(X_train, X_test):
preprocessor = prep.MinMaxScaler().fit(X_train)
X_train = preprocessor.transform(X_train)
X_test = preprocessor.transform(X_test)
return X_train, X_test
def get_random_block_from_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index:(start_index + batch_size)]
X_train, X_test = min_max_scale(mnist.train.images, mnist.test.images)
n_samples = int(mnist.train.num_examples)
training_epochs = 20
batch_size = 128
display_step = 1
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 200,
optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))
if os.path.exists('result_VAE'):
os.rename('result_VAE','result_VAE_before')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
else:
os.mkdir('result_VAE')
path = os.getcwd()
print(path)
paths = path + str('\\result_VAE')
print(paths)
os.chdir(paths)
print(os.getcwd())
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(n_samples / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_xs = get_random_block_from_data(X_train, batch_size)
# Fit training using batch data
cost = autoencoder.partial_fit(batch_xs)
# Compute average loss
avg_cost += cost / n_samples * batch_size
weights = autoencoder.getWeights
bias = autoencoder.getBiases
#data.append(batch_data)
reconstract = autoencoder.reconstruct(batch_xs)
picture = np.reshape(reconstract, [128, 28, 28, -1])
#print(picture.shape)
result = picture[1:2]
#print(result.shape)
data = np.reshape(result, [28, 28])
imsave('%d.jpg' %(i), data)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
print ("Total cost: " + str(autoencoder.calc_total_cost(X_test)))
print('weights is:', weights)
print('bias is:', bias)
print(reconstract.shape)
print('recontruct result is:', reconstract)
plt.plot(data)
plt.show()
print('ending...')
if __name__ == '__main__':
VAE_main()
```
#### File: Autoencoder/test/VAE_Autocoder.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import sys
import time
import numpy as np
import tensorflow as tf
import sklearn.preprocessing as prep
from CLASS.CLASS_VAE import *
from tensorflow.examples.tutorials.mnist import input_data
from matplotlib import pyplot as plt
from scipy.misc import imsave
flags = tf.app.flags
flags.DEFINE_integer('nb_epochs', 2, 'the numbers of the epoch')
flags.DEFINE_integer('batch_size', 128, 'the size of the batch')
flags.DEFINE_integer('display_time', 1, 'the time of the display')
flags.DEFINE_float('learning_rate', 0.001, 'the learning rate of the optimizer')
flags.DEFINE_string('your_path', 'D:/Data Minning/train_code/train/Autoencoder/test', 'the path of you code')
flags.DEFINE_string('optimizer', 'adag', 'choose the right optimizer')
FLAGS = flags.FLAGS
def standard_scale(X_train, X_test):
preprocess = prep.StandardScaler().fit(X_train)
X_train = preprocess.transform(X_train)
X_test = preprocess.transform(X_test)
return X_train, X_test
def get_batch_data(data, batch_size):
start_index = np.random.randint(0, len(data) - batch_size)
return data[start_index : start_index + batch_size]
def Save_Result():
if os.path.exists(os.path.dirname('result_VAE')):
os.rename('result_VAE','result_VAE_before')
os.mkdir('result_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\result_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
else:
os.mkdir('result_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\result_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
def Save_Origial():
if os.path.exists(os.path.dirname('origial_VAE')):
os.rename('origial_VAE','origial_before_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\origial_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
else:
os.mkdir('origial_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\origial_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
def Save_transform():
if os.path.exists(os.path.dirname('transform_VAE')):
os.rename('transform_VAE','transform_before_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\transform_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
else:
os.mkdir('transform_VAE')
path = os.getcwd()
#print(path)
paths = path + str('\\transform_VAE')
#print(paths)
os.chdir(paths)
#print(os.getcwd())
def choose_optimizer(name):
if name == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
elif name == 'adam':
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
elif name == 'adag':
optimizer = tf.train.AdagradOptimizer(FLAGS.learning_rate)
elif name == 'adad':
optimizer = tf.train.AdadeltaOptimizer(FLAGS.learning_rate)
elif name == 'rmsp':
optimizer = tf.train.RMSPropOptimizer(FLAGS.learning_rate)
else:
print('please add you optimizer...')
raise Exception('Error...')
return optimizer
def print_information(cost, epoch):
plt.xlabel('the number of each epoch')
plt.ylabel('the average cost of each epoch')
plt.title('the picture of the cost')
plt.plot(epoch, cost)
plt.show()
print('ending...')
#def main(unused_argv):
def main(_):
start_time = time.time()
print('starting...')
print('loding data,please wait a moment...')
#print('\n')
mnist = input_data.read_data_sets('MNIST_data', one_hot = True)
n_samples = int(mnist.train.num_examples)
# load the mnist datasets and print the shape
X_train, X_test = standard_scale(mnist.train.images, mnist.test.images)
print(mnist.train.images.shape)
print(X_train.shape)
print(X_test.shape)
#print('\n')
# Instance an object
autoencoder = VariationalAutoencoder(n_input = 784,
n_hidden = 256,
optimizer = choose_optimizer(name = FLAGS.optimizer))
# save the origial pictures
Save_Origial()
for epoch1 in range(FLAGS.nb_epochs):
total_batch = int(n_samples / FLAGS.batch_size)
for i in range(total_batch):
batch_data = get_batch_data(X_train, FLAGS.batch_size)
origial = np.reshape(batch_data, [128, 28, 28, -1])
origial_picture = origial[1:2]
origial_result = np.reshape(origial_picture, [28, 28])
imsave('%d.jpg' %(i), origial_result)
# get back to the upper path
path = FLAGS.your_path
print('start saving the origial pictures...')
print(path)
os.chdir(path)
# save the result of the hidden layer
Save_transform()
for epoch1 in range(FLAGS.nb_epochs):
total_batch = int(n_samples / FLAGS.batch_size)
for j in range(total_batch):
batch_data = get_batch_data(X_train, FLAGS.batch_size)
transforms = autoencoder.transform(batch_data)
#print(transforms.shape)
transform = np.reshape(transforms, [128, 16, 16, -1])
transform_picture = transform[1:2]
transform_result = np.reshape(transform_picture, [16, 16])
imsave('%d.jpg' %(j), transform_result)
# get back to the upper path
path = FLAGS.your_path
print('start saving the hidden layers pictures...')
print(path)
os.chdir(path)
# save the reconstraction pictures
Save_Result()
cost_value = []
epochs = []
for epoch in range(FLAGS.nb_epochs):
total_batch = int(n_samples / FLAGS.batch_size)
avg_cost = 0.
for k in range(total_batch):
batch_data = get_batch_data(X_train, FLAGS.batch_size)
cost = autoencoder.partial_fit(batch_data)
avg_cost += cost / n_samples * FLAGS.batch_size
reconstract = autoencoder.reconstruct(batch_data)
picture = np.reshape(reconstract, [128, 28, 28, -1])
result = picture[1:2]
data = np.reshape(result, [28, 28])
imsave('%d.jpg' %(k), data)
cost_value.append(avg_cost)
epochs.append(epoch)
if epoch % FLAGS.display_time == 0:
print('Epoch:', '%04d' %(epoch + 1), 'cost =','{:.9f}'.format(avg_cost))
print('Total cost is: ' + str(autoencoder.calc_total_cost(X_test)))
print_information(cost = cost_value, epoch = epochs)
print('Total time is %d s' %(time.time() - start_time))
if __name__ == '__main__':
tf.app.run()
#sys.exit(0)
#tf.app.run(main=None, argv=None)
#AGN_main()
``` |
{
"source": "18214091046/ruoli-sign-optimization",
"score": 2
} |
#### File: ruoli-sign-optimization/actions/collection.py
```python
import json
import re
import os
from requests_toolbelt import MultipartEncoder
from todayLoginService import TodayLoginService
from liteTools import LL, DT, RT, MT, ST, SuperString, TaskError, CpdailyTools
class Collection:
# 初始化信息收集类
def __init__(self, userInfo, userSession, userHost):
self.session = userSession
self.host = userHost
self.userInfo = userInfo
self.task = None
self.wid = None
self.formWid = None
self.schoolTaskWid = None
self.instanceWid = None
self.form = {}
self.historyTaskData = {}
# 保存图片
def savePicture(self, picSize, picNumber, ossKey):
url = f'{self.host}wec-counselor-collector-apps/stu/collector/saveAttachment'
attachName = '图片-{:0>2d}'.format(picNumber)
params = {'attachmentSize': picSize,
'ossKey': ossKey, "attachName": attachName}
self.session.post(url=url, headers={'content-type': 'application/json'}, data=json.dumps(params),
verify=False)
# 查询表单
def queryForm(self):
headers = self.session.headers
headers['Content-Type'] = 'application/json'
url = f'{self.host}wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList'
# 第一次请求接口获取cookies(MOD_AUTH_CAS)
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 获取首页信息, 获取页数
pageSize = 20
pageReq = {"pageNumber": 1, "pageSize": pageSize}
pageNumber = 0
totalSize = 1
# 按页遍历
while pageNumber*pageSize <= totalSize:
pageNumber += 1
pageReq["pageNumber"] = pageNumber
# 获取**任务列表**数据
res = self.session.post(url, headers=headers,
data=json.dumps(pageReq), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, f"获取到的第{pageNumber}页任务列表", res)
# 在**首页**获取历史信息收集**总数**
if pageNumber == 1:
# 历史信息收集总数
totalSize = res['datas']['totalSize']
# 如果没有获取到历史任务则报错
if totalSize == 0:
raise TaskError("没有获取到信息收集任务", 400)
# 按页中任务遍历
for task in res['datas']['rows']:
if self.userInfo.get('title'):
# 如果任务需要匹配标题
taskTitle = SuperString(self.userInfo['title'])
if not taskTitle.match(task["subject"]):
# 跳过标题不匹配的任务
continue
if self.userInfo.get('signLevel') == 1 and task['isHandled'] == 1:
# 如果仅填报"未填报的任务"且相应任务已被填报,则报错
raise TaskError(f"收集任务已经被填报", 100, task['subject'])
else:
# 如果不需要匹配标题,则获取第一个任务
if self.userInfo.get('signLevel') == 1 and task['isHandled'] == 1:
# 仅填报"未填报的任务"时如果任务已被填报,则跳过该任务
continue
# 提取任务的基本信息
self.wid = task['wid']
self.formWid = task['formWid']
self.instanceWid = task.get('instanceWid', '')
self.taskName = task['subject']
# 获取任务详情
url = f'{self.host}wec-counselor-collector-apps/stu/collector/detailCollector'
params = {"collectorWid": self.wid,
"instanceWid": self.instanceWid}
res = self.session.post(
url, headers=headers, data=json.dumps(params), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '查询任务详情返回结果', res['datas'])
try:
self.schoolTaskWid = res['datas']['collector']['schoolTaskWid']
except TypeError:
self.schoolTaskWid = ''
LL.log(1, '循环普通任务实例wid为空')
# 获取任务表单
url = f'{self.host}wec-counselor-collector-apps/stu/collector/getFormFields'
params = {"pageSize": 9999, "pageNumber": 1,
"formWid": self.formWid, "collectorWid": self.wid}
res = self.session.post(
url, headers=headers, data=json.dumps(params), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, '查询任务表单返回结果', res['datas'])
self.task = res['datas']['rows']
return
raise TaskError("没有获取到合适的信息收集任务", 400)
# 获取历史签到任务详情
def getHistoryTaskInfo(self):
'''获取历史签到任务详情'''
headers = self.session.headers
headers['Content-Type'] = 'application/json;charset=UTF-8'
# 获取首页信息, 获取页数
pageSize = 20
url = f'{self.host}wec-counselor-collector-apps/stu/collector/queryCollectorHistoryList'
pageReq = {"pageNumber": 1, "pageSize": pageSize}
pageNumber = 0
totalSize = 1
# 按页遍历
while pageNumber*pageSize <= totalSize:
pageNumber += 1
pageReq["pageNumber"] = pageNumber
# 获取**任务列表**数据
res = self.session.post(url, headers=headers,
data=json.dumps(pageReq), verify=False)
res = DT.resJsonEncode(res)
LL.log(1, f"获取到第{pageNumber}页历史信息收集数据", res)
# 在**首页**获取历史信息收集**总数**
if pageNumber == 1:
# 历史信息收集总数
totalSize = res['datas']['totalSize']
# 如果没有获取到历史任务则报错
if totalSize < 0:
raise TaskError(f"没有获取到历史任务", 301, self.taskName)
# 按页中任务遍历
for task in res['datas']['rows']:
if task['isHandled'] == 1 and task['formWid'] == self.formWid:
# 找到和当前任务匹配的历史已处理任务,开始获取表单
historyInstanceWid = task['instanceWid']
historyWid = task['wid']
# 模拟请求
url = f'{self.host}wec-counselor-collector-apps/stu/collector/getUnSeenQuestion'
self.session.post(url, headers=headers, data=json.dumps(
{"wid": self.wid, "instanceWid": self.instanceWid}), verify=False)
# 模拟请求:获取历史信息收集信息
url = f'{self.host}wec-counselor-collector-apps/stu/collector/detailCollector'
self.session.post(url, headers=headers, data=json.dumps(
{"collectorWid": self.wid, "instanceWid": self.instanceWid}), verify=False)
# 获取表单
url = f'{self.host}wec-counselor-collector-apps/stu/collector/getFormFields'
formReq = {"pageNumber": 1, "pageSize": 9999, "formWid": self.formWid,
"collectorWid": historyWid, "instanceWid": historyInstanceWid}
res = self.session.post(url, headers=headers, data=json.dumps(formReq),
verify=False)
res = DT.resJsonEncode(res)
# 模拟请求
url = f'{self.host}wec-counselor-collector-apps/stu/collector/queryNotice'
self.session.post(url, headers=headers,
data=json.dumps({}), verify=False)
# 处理表单
form = res['datas']['rows']
# 逐个处理表单内问题
for item in form:
# 填充额外参数
item['show'] = True
item['formType'] = '0' # 盲猜是任务类型、待确认
item['sortNum'] = str(item['sort']) # 盲猜是sort排序
if item['fieldType'] == '2':
'''如果是单选题,需要删掉多余选项'''
item['fieldItems'] = list(
filter(lambda x: x['isSelected'], item['fieldItems']))
if item['fieldItems']:
'''如果已选有选项,则将itemWid填入value中'''
item['value'] = item['fieldItems'][0]['itemWid']
elif item['fieldType'] == '3':
'''如果是多选题,也需要删掉多余选项'''
item['fieldItems'] = list(
filter(lambda x: x['isSelected'], item['fieldItems']))
if item['fieldItems']:
'''如果已选有选项,则将itemWid填入value中'''
item['value'] = ','.join(
[i['itemWid'] for i in item['fieldItems']])
elif item['fieldType'] == '4':
'''如果是图片上传类型'''
# 填充其他信息
item.setdefault('http', {
'defaultOptions': {
'customConfig': {
'pageNumberKey': 'pageNumber',
'pageSizeKey': 'pageSize',
'pageDataKey': 'rows',
'pageTotalKey': 'totalSize',
'dataKey': 'datas',
'codeKey': 'code',
'messageKey': 'message'
}
}
})
item['uploadPolicyUrl'] = '/wec-counselor-collector-apps/stu/obs/getUploadPolicy'
item['saveAttachmentUrl'] = '/wec-counselor-collector-apps/stu/collector/saveAttachment'
item['previewAttachmentUrl'] = '/wec-counselor-collector-apps/stu/collector/previewAttachment'
item['downloadMediaUrl'] = '/wec-counselor-collector-apps/stu/collector/downloadMedia'
self.historyTaskData['form'] = form
return self.historyTaskData
# 如果没有获取到历史信息收集则报错
raise TaskError(f"没有找到匹配的历史任务", 301, self.taskName)
# 填写表单
def fillForm(self):
LL.log(1, '填充表单')
if self.userInfo['getHistorySign']:
hti = self.getHistoryTaskInfo()
self.form['form'] = hti['form']
self.form["formWid"] = self.formWid
self.form["address"] = self.userInfo['address']
self.form["collectWid"] = self.wid
self.form["schoolTaskWid"] = self.schoolTaskWid
self.form["uaIsCpadaily"] = True
self.form["latitude"] = self.userInfo['lat']
self.form["longitude"] = self.userInfo['lon']
self.form['instanceWid'] = self.instanceWid
else:
# ---初始化用户表单---
task_form = []
taskLen = len(self.task)
userFormList = self.userInfo['forms']
userFormList = [u['form'] for u in userFormList]
# 如果是用"number"控制表单填报(number是题号, 1开始), 转换为用"isNeed"控制表单填报
userFormSortIndex = {}
for u in userFormList:
# 检查是否每一项都有"number"项
if "number" in u:
userFormSortIndex[u['number']] = {
"title": u['title'], "value": u['value'], "isNeed": 1}
else:
break
else:
'''如果每一项都有"number"项'''
userFormList = []
for i in range(taskLen):
userFormList.append(
userFormSortIndex.get(i+1, {"isNeed": 0}))
# 检查用户配置长度与查询到的表单长度是否匹配
if taskLen != len(userFormList):
raise TaskError(
f'用户配置了{len(userFormList)}个问题,查询到的表单有{taskLen}个问题,不匹配!', 301, self.taskName)
# ---开始填充表单---
for formItem, userForm in zip(self.task, userFormList):
formItem['formType'] = '0'
formItem['sortNum'] = str(formItem['sort'])
# 根据用户配置决定是否要填此选项
if userForm['isNeed'] == 1:
'''用户需要填此项'''
# 判断用户是否需要检查标题
if self.userInfo['checkTitle'] == 1:
# 如果检查到标题不相等
userFormTitle = SuperString(userForm['title'])
if not userFormTitle.match(formItem['title']):
raise TaskError(
f'\n有配置项的标题不匹配\n您的标题为:『{userFormTitle}』\n系统的标题为:『{formItem["title"]}』', 301, self.taskName)
# 填充多出来的参数(新版增加了三个参数,暂时不知道作用)
formItem['show'] = True
# 开始填充表单
# 文本类型
if formItem['fieldType'] in ('1', '5', '6', '7', '11', '12'):
'''
6: 时间选择
7: 地址选择
11: 手机号
12: 身份证
'''
formItem['value'] = str(SuperString(userForm['value']))
# 单选类型
elif formItem['fieldType'] == '2':
# 定义单选框的wid
itemWid = ''
# 单选需要移除多余的选项
for fieldItem in formItem['fieldItems'].copy():
userFormValue = SuperString(userForm['value'])
if not userFormValue.match(fieldItem['content']):
formItem['fieldItems'].remove(fieldItem)
else:
itemWid = fieldItem['itemWid']
if itemWid == '':
raise TaskError(
f'\n『{userForm}』配置项的选项不正确,该选项为单选,且未找到您配置的值', 301, self.taskName
)
formItem['value'] = itemWid
# 多选类型
elif formItem['fieldType'] == '3':
# 定义单选框的wid
itemWidArr = []
userItems = [SuperString(i)for i in userForm['value']]
# 多选也需要移除多余的选项
for fieldItem in formItem['fieldItems'].copy():
# 查看该表单项在不在用户配置的选项中
for i in userItems:
if i.match(fieldItem['content']):
itemWidArr.append(fieldItem['itemWid'])
break
else:
formItem['fieldItems'].remove(fieldItem)
# 若多选一个都未选中
if len(itemWidArr) == 0:
raise TaskError(
f'『{userForm}』配置项的选项不正确,该选项为多选,且未找到您配置的值', 301, self.taskName
)
formItem['value'] = ','.join(itemWidArr)
# 图片类型
elif formItem['fieldType'] in ('4', '16'):
'''
4: 上传图片
16: 手写板
'''
# 序列/字符串转列表
dirList = DT.formatStrList(userForm['value'])
# 检查列表长度
dirListLen = len(dirList)
if dirListLen == 0:
raise TaskError(f'请在配置中填写图片路径', 301, self.taskName)
elif dirListLen > 10:
raise TaskError(
f'配置中填写的图片路径({dirListLen}个)过多', 301, self.taskName)
# 将列表中的每一项都加入到value中
imgUrlList = []
for i, pic in enumerate(dirList, 1):
picBlob, picType = RT.choicePhoto(pic)
# 上传图片
url_getUploadPolicy = f'{self.host}wec-counselor-collector-apps/stu/obs/getUploadPolicy'
ossKey = CpdailyTools.uploadPicture(
url_getUploadPolicy, self.session, picBlob, picType)
# 获取图片url
url_previewAttachment = f'{self.host}wec-counselor-collector-apps/stu/collector/previewAttachment'
imgUrl = CpdailyTools.getPictureUrl(
url_previewAttachment, self.session, ossKey)
# 加入到value中
imgUrlList.append(imgUrl)
# 保存图片
self.savePicture(len(picBlob), i, ossKey)
formItem['value'] = ",".join(imgUrlList)
# 填充其他信息
formItem.setdefault('http', {
'defaultOptions': {
'customConfig': {
'pageNumberKey': 'pageNumber',
'pageSizeKey': 'pageSize',
'pageDataKey': 'rows',
'pageTotalKey': 'totalSize',
'dataKey': 'datas',
'codeKey': 'code',
'messageKey': 'message'
}
}
})
formItem['uploadPolicyUrl'] = '/wec-counselor-collector-apps/stu/obs/getUploadPolicy'
formItem['saveAttachmentUrl'] = '/wec-counselor-collector-apps/stu/collector/saveAttachment'
formItem['previewAttachmentUrl'] = '/wec-counselor-collector-apps/stu/collector/previewAttachment'
formItem['downloadMediaUrl'] = '/wec-counselor-collector-apps/stu/collector/downloadMedia'
else:
raise TaskError(
f'\n出现未知表单类型,请反馈『{formItem}』', 301, self.taskName
)
task_form.append(formItem)
else:
'''用户不需要填此项'''
formItem['show'] = False
formItem['value'] = ''
if 'fieldItems' in formItem:
formItem['fieldItems'].clear()
task_form.append(formItem)
self.form["form"] = task_form
self.form["formWid"] = self.formWid
self.form["address"] = self.userInfo['address']
self.form["collectWid"] = self.wid
self.form["schoolTaskWid"] = self.schoolTaskWid
self.form["uaIsCpadaily"] = True
self.form["latitude"] = self.userInfo['lat']
self.form["longitude"] = self.userInfo['lon']
self.form['instanceWid'] = self.instanceWid
def getSubmitExtension(self):
'''生成各种额外参数'''
extension = {
"lon": self.form['longitude'],
"lat": self.form['latitude'],
"model": self.userInfo['model'],
"appVersion": self.userInfo['appVersion'],
"systemVersion": self.userInfo['systemVersion'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"deviceId": self.userInfo['deviceId']
}
self.cpdailyExtension = CpdailyTools.encrypt_CpdailyExtension(
json.dumps(extension))
self.bodyString = CpdailyTools.encrypt_BodyString(
json.dumps(self.form))
self.submitData = {
"lon": self.form['longitude'],
"version": self.userInfo['signVersion'],
"calVersion": self.userInfo['calVersion'],
"deviceId": self.userInfo['deviceId'],
"userId": self.userInfo['username'],
"systemName": self.userInfo['systemName'],
"bodyString": self.bodyString,
"lat": self.form['latitude'],
"systemVersion": self.userInfo['systemVersion'],
"appVersion": self.userInfo['appVersion'],
"model": self.userInfo['model'],
}
self.submitData['sign'] = CpdailyTools.signAbstract(self.submitData)
# 提交表单
def submitForm(self):
self.getSubmitExtension()
headers = {
'User-Agent': self.session.headers['User-Agent'],
'CpdailyStandAlone': '0',
'extension': '1',
'Cpdaily-Extension': self.cpdailyExtension,
'Content-Type': 'application/json; charset=utf-8',
# 请注意这个应该和配置文件中的host保持一致
'Host': re.findall('//(.*?)/', self.host)[0],
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip'
}
submitUrl = f'{self.host}wec-counselor-collector-apps/stu/collector/submitForm'
LL.log(1, '提交表单', 'data', self.submitData,
'headers', headers, 'params', self.submitData)
data = self.session.post(
submitUrl, headers=headers, data=json.dumps(self.submitData), verify=False)
data = DT.resJsonEncode(data)
# 检查签到完成
url = f'{self.host}wec-counselor-collector-apps/stu/collector/detailCollector'
params = {"collectorWid": self.wid,
"instanceWid": self.instanceWid}
res = self.session.post(
url, headers=headers, data=json.dumps(params), verify=False)
res = DT.resJsonEncode(res)
if res['datas']['collector']['isUserSubmit'] == 1:
self.userInfo['taskStatus'].code = 101
else:
raise TaskError(f'提交表单返回『{data}』且任务状态仍是未签到', 300, self.taskName)
return '[%s]%s' % (data['message'], self.taskName)
``` |
{
"source": "18216279688/tools-box",
"score": 4
} |
#### File: tools-box/tools/lyl.py
```python
import pandas as pd
import xlrd
import csv
import codecs
# excel文件转换为csv文件
def xlsx_to_csv(path):
workbook = xlrd.open_workbook(path)
table = workbook.sheet_by_index(0)
name = input("转换之后的文件名:")
with codecs.open(f'{name}.csv', 'w', encoding='utf-8') as f:
write = csv.writer(f)
for row_num in range(table.nrows):
row_value = table.row_values(row_num)
write.writerow(row_value)
print()
# csv文件转为excel文件
def csv_to_xlsx(path):
csv_1 = pd.read_csv(path, encoding='utf-8')
file_name = input("转换之后的文件名:")
sheet_name = input("第一张表格的名字:")
csv_1.to_excel(f'{file_name}.xlsx', sheet_name=f'{sheet_name}')
print("转换成功")
print()
# 读取excel文件
def read_xlsx(path):
data = pd.read_excel(path)
print("获取到的数据为:")
print(data.values)
print()
# 读取csv文件
def read_csv(path):
data = pd.read_csv(path)
print("获取到的数据为:")
print(data)
print()
# 功能菜单
def menu():
while True:
print("——————————————————功能选择————————————————")
print("1.文件转换")
print("2.文件读取")
print("3.退出程序")
choice = input("请输入您的选择:")
if choice == '1':
print("选择转换类型:")
print("1.excel转为csv")
print("2.csv转为excel")
choice1 = input("选择准换类型:")
if choice1 == '1':
path_xlsx = input(r"请输入文件地址(格式为:C:\xxx\xxx\xxx.xlsx):")
xlsx_to_csv(path_xlsx)
elif choice1 == '2':
path_csv = input(r"请输入文件地址(格式为:C:\xxx\xxx\xxx.csv):")
csv_to_xlsx(path_csv)
if choice == '2':
print("选择读取类型:")
print("1.excel")
print("2.csv")
choice2 = input("选择读取类型:")
if choice2 == '1':
path_xlsx = input(r"请输入文件地址(格式为:C:\xxx\xxx\xxx.xlsx):")
read_xlsx(path_xlsx)
elif choice2 == '2':
path_csv = input(r"请输入文件地址(格式为:C:\xxx\xxx\xxx.csv):")
read_csv(path_csv)
if choice == '3':
print("欢迎下次使用")
break
if __name__ == '__main__':
menu()
```
#### File: tools-box/tool/zhuanhuan.py
```python
from PIL import Image#导入Image库用与操作图片文件
import datetime
def image_to_txt(imgname):
#获取当前时间,转换成字符串
timenow = datetime.datetime.now()
timestr = timenow.strftime("%Y-%m-%d-%H-%M-%S")
#生成的Txt文件用<原图片文件名+ 当前时间字符串+ ".txt"后缀>作为文件名
namestr = "{0}-{1}.txt".format(imgname, timestr)
#打开或创建一个TxT文件文件
txt = open(namestr, "w+")
#打开图片文件文件
print("Open Image File [{0}]".format(imgname))
try:
img = Image.open(imgname)
except:
print("Error to Open [{0}]!".format(imgname))
#判断图片文件的格式, 这里必须为"RGB"格式, 如果不是"RGB"格式,
#则用convert函数转换成"RGB"格式.
if "RGB" == img.mode:
print("Size{0},Format({1}),Color({2})".format(img.size, img.format, img.mode))
else:
print("Not a RGB image file!")
img = img.convert("RGB")
print("Convert to RGB Success!")
#获取图片文件宽和高
width = img.size[0]
height = img.size[1]
bei = 0
#如果图片文件大于400*400像素,则对图片进行缩放,缩放比例依照宽度和高度中的最大值
if width >= height:
max = width
else:
max = height
if max >= 400:
bei = max / 400
width = int(width / bei)
height = int(height / bei)
img = img.resize((width, height))
print("Image Size too large, Resize to", img.size)
#把图片文件转换成纯黑白的图片
img = img.convert("1")
index = 0
print("Start Process!")
for w in range(width):#遍历图片的宽度,[0, width)
#显示进度
index += 1
print("#", end="")
txt.write("/")
if index >= 60:#大于60换行
index = 0
print("")
for h in range(height):#遍历图片的高度[0, height)
xiangsu = img.getpixel((w, h))#获取图片当前坐标点的像素值
#print("w=", w, "h=", h, "xiangsu=", xiangsu)
if xiangsu != 0:#因为是纯黑白图像,所以像素颜色只有0或255两种值
txt.write("_")#非0则往txt中写入"_"表示白色
#print("w=", w, "h=", h, "xiangsu=", xiangsu)
else:
txt.write("*")#0则往txt中写入"*"表示黑色
#print("w=", w, "h=", h, "xiangsu=", xiangsu)
txt.write("/")
txt.write("\n")
#保存新生成的TXT文件
print("\nProcess Done!")
print("Save File As [{0}]".format(namestr))
txt.close()
print("Save Done!")
name = input("Please Input Image File Name:")
print("Start")
try:
image_to_txt(name)
except:
print("Error!")
print("Over")
``` |
{
"source": "1827529960/Recognition-of-handwritten-Chinese-characters",
"score": 3
} |
#### File: 1827529960/Recognition-of-handwritten-Chinese-characters/get_model.py
```python
import keras.backend as k
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.layers import Conv2D, MaxPooling2D
def get_model():
k.clear_session()
# 创建一个新模型
model = Sequential()
model.add(Conv2D(32, 3, padding='same', activation='relu', input_shape=(64, 64, 3))) # 64 64 3
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, 3, padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='softmax'))
model.summary()
# 选择优化器和损失函数
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
``` |
{
"source": "18279406017/awesome-reinforcement-learning",
"score": 2
} |
#### File: awesome-reinforcement-learning/chap01 cross entropy method/cross entropy.py
```python
import argparse
import gym
import torch
import torch.nn as nn
import numpy as np
from collections import namedtuple
from tensorboardX import SummaryWriter
class Net(nn.Module):
def __init__(self, args, obs_size, n_action):
super(Net, self).__init__()
self.net = nn.Sequential(
nn.Linear(obs_size, args.Hidden_Size),
nn.ReLU(),
nn.Linear(args.Hidden_Size, n_action)
)
def forward(self, x):
return self.net(x)
def iterate_batches(env, net, args):
Episode = namedtuple("Episode", field_names=["reward", "steps"])
Single_Step = namedtuple("Single_Step", field_names=["observation", "action"])
batch = []
Episode_Steps = []
episode_reward = 0.0
obs = env.reset() # obtain the first observation
sm = nn.Softmax(dim=1)
while True:
obs_v = torch.FloatTensor(obs)
# get the probability of actions
act_probs_v = sm(net(obs_v).unsqueeze(dim=0))
# tensor.data convert the tensor into numpy array
act_probs = act_probs_v.data.numpy()[0]
# get the action by sampling the distribution
action = np.random.choice(len(act_probs), p=act_probs)
next_obs, reward, is_done, _ = env.step(action)
episode_reward += reward
step = Single_Step(observation=obs, action=action) # [s,a]
Episode_Steps.append(step) #
if is_done:
batch.append(Episode(reward=episode_reward, steps=Episode_Steps)) # saving the total reward and steps we have taken
episode_reward = 0
Episode_Steps = []
next_obs = env.reset()
if len(batch) == args.Batch_Size:
yield batch
batch = []
obs = next_obs
def filter_batch(batch, percentile):
rewards = list(map(lambda s: s.reward, batch))
reward_bound = np.percentile(rewards, percentile)
train_obs = []
train_act = []
for reward, steps in batch:
if reward < reward_bound:
continue
train_obs.extend(map(lambda step: step.observation, steps)) # add all obs of batch
train_act.extend(map(lambda step: step.action, steps)) # add all actions of batch
train_obs_v = torch.FloatTensor(train_obs)
train_act_v = torch.LongTensor(train_act)
return train_obs_v, train_act_v, reward_bound, float(np.mean(rewards))
def main(args):
env = gym.make("CartPole-v0")
net = Net(args, obs_size=env.observation_space.shape[0], n_action=env.action_space.n)
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params=net.parameters(), lr=0.01)
# Use tensorboard --logdir runs
writer = SummaryWriter(comment="-cartpole")
for iter, batch in enumerate(iterate_batches(args=args, env=env, net=net)):
obs_v, act_v, reward_bound, mean_reward = filter_batch(batch=batch, percentile=args.Percentule)
optimizer.zero_grad()
act_pre = net(obs_v) # shape=(batch, action_dim) but the shape of act_v
loss = loss_func(act_pre, act_v)
loss.backward()
optimizer.step()
print("Iter: {} | reward_mean {} | reward_bound {}".format(iter, mean_reward, reward_bound))
writer.add_scalar("loss", loss.item(), iter)
writer.add_scalar("reward_bound", reward_bound, iter)
writer.add_scalar("reward_mean", mean_reward, iter)
if mean_reward > 199:
print("Solved!")
break
writer.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="The parameters of cross entropy RL")
parser.add_argument("--Hidden_Size", type=int ,help="The Hidden Size of Neural Network", default=128)
parser.add_argument("--Batch_Size", type=int, default=16)
parser.add_argument("--Percentule", type=int, help="The percentage of good trajectory", default=70)
args = parser.parse_args()
main(args)
```
#### File: awesome-reinforcement-learning/chap10 A2C/naiveA2C.py
```python
import gym
import argparse
import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Categorical
import matplotlib.pyplot as plt
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
# This code is from openai baseline
# https://github.com/openai/baselines/tree/master/baselines/common/vec_env
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
def make_env(args):
def _thunk():
env = gym.make(args.env_name)
return env
return _thunk
class ActorCritic(nn.Module):
def __init__(self, num_inputs, num_outputs, hidden_size, std=0.0):
super(ActorCritic, self).__init__()
self.critic = nn.Sequential(
nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1)
)
self.actor = nn.Sequential(
nn.Linear(num_inputs, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, num_outputs),
nn.Softmax(dim=1),
)
def forward(self, x):
value = self.critic(x)
probs = self.actor(x)
dist = Categorical(probs)
return dist, value
def test_env(args, vis=False):
env = gym.make(args.env_name) # a single env
state = env.reset()
if vis: env.render()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
next_state, reward, done, _ = env.step(dist.sample().cpu().numpy()[0])
state = next_state
if vis: env.render()
total_reward += reward
return total_reward
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns
def plot(epoch, rewards):
plt.plot(rewards, 'b-')
plt.title('frame %s. reward: %s' % (epoch, rewards[-1]))
plt.pause(0.0001)
class Agent(object):
def __init__(self, env, exp_buffer, args):
super(Agent, self).__init__()
def build_model(self):
pass
def learn(self):
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="the parameter of a2c")
parser.add_argument('--hidden_size', type=int, help="maximum capacity of the buffer", default=256)
parser.add_argument('--lr', type=float, help='learning rate used in the Adam optimizer', default=1e-3)
parser.add_argument('--num_steps', type=int, help="the num of rollout", default=5)
parser.add_argument("--env_name", default="CartPole-v0") # OpenAI gym environment name
parser.add_argument("--num_envs", type=int, default=8) # OpenAI gym environment name
arg = parser.parse_args()
plt.ion()
envs = [make_env(arg) for i in range(arg.num_envs)]
envs = SubprocVecEnv(envs) # 8 env
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ActorCritic(envs.observation_space.shape[0], envs.action_space.n, arg.hidden_size).to(device)
optimizer = optim.Adam(model.parameters())
test_rewards = []
state = envs.reset()
for epoch in range(20000):
log_probs = []
values = []
rewards = []
masks = []
entropy = 0
for _ in range(arg.num_steps): # rollout trajectory
state = torch.FloatTensor(state).to(device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
state = next_state
if epoch % 100 == 0:
test_rewards.append(np.mean([test_env(args=arg) for _ in range(10)]))
plot(epoch, test_rewards)
next_state = torch.FloatTensor(next_state).to(device)
_, next_value = model(next_state)
returns = compute_returns(next_value, rewards, masks)
log_probs = torch.cat(log_probs)
returns = torch.cat(returns).detach()
values = torch.cat(values)
advantage = returns - values
actor_loss = -(log_probs * advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy
optimizer.zero_grad()
loss.backward()
optimizer.step()
# test_env(True)
``` |
{
"source": "18279406017/code-of-csdn",
"score": 3
} |
#### File: code-of-csdn/OCR-Recognition/gen_captcha.py
```python
from captcha.image import ImageCaptcha #pip install captcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
#验证码中的字符
number = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
ALPHABET = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
'O', 'P', 'Q', 'R', 'S', 'T', 'U','V', 'W', 'X', 'Y', 'Z']
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u','v', 'w', 'x', 'y', 'z']
def random_captcha_lable(char_set = number+alphabet+ALPHABET,captcha_size=4):
captcha_lable = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_lable.append(c)
return captcha_lable
#生成字符对应的验证码
def gen_captcha_lable_and_image():
image = ImageCaptcha()
captcha_lable = random_captcha_lable()
captcha_lable = ''.join(captcha_lable)
captcha = image.generate(captcha_lable)
captcha_image = Image.open(captcha)
captcha_image = np.array(captcha_image)
return captcha_lable,captcha_image
if __name__ == '__main__':
text,image = gen_captcha_lable_and_image()
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1,0.9,text,ha='center',va='center',transform=ax.transAxes)
plt.imshow(image)
plt.show()
``` |
{
"source": "18280108415/Interface",
"score": 3
} |
#### File: Interface/Common/sql.py
```python
import readConf
from readConf import *
import pymysql.cursors
#import readConf
print(readConf.host1,readConf.port,readConf.username,readConf.password,readConf.db)
print(type(int(readConf.port)))
'''步骤:
1.创建连接
2.创建游标
3.执行sql
4.提交
5.关闭游标
6.关闭连接
'''
'''class Mysql:
def __init__(self):
self= self'''
def mysql():
#print (cf.host)
#Create connection
conn = pymysql.connect(host=readConf.host1, port=int(readConf.port), user=readConf.username, passwd=readConf.password, db=readConf.db)
#create cursor
cursor = conn.cursor()
#execute sql
cursor.execute(readConf.sql)
#commit
conn.commit()
testData = cursor.fetchone()
print(testData)
#close corsor
cursor.close()
#close commit
conn.close()
if __name__ == '__main__':
mysql()
```
#### File: 18280108415/Interface/Request.py
```python
import requests
#get
'''headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36','Connection': 'keep-alive'}
r = requests.get('http://www.baidu.com/',headers=headers)
print(r.text)
print (r.encoding)
print(r.url)
print(r.content)
#解决r.text 乱码问题
print(r.content.decode('utf-8'))
print('test')
#print(r.raise_for_status())
print(r.status_code)'''
#post
url1= ("https://fanyi.baidu.com/langdetect")
payload = {'query':'I miss you'}
headers={'Content-Type':'application/x-www-form-urlencoded','charset':'UTF-8'}
'''r = requests.post("https://fanyi.baidu.com/langdetect", data=payload,header=headers)
print(r.text)
print(r.raise_for_status())'''
def send_post(url=None,data=None,header=None):
r=requests.post(url, data,header)
print(r.text)
print(r.raise_for_status())
if __name__ == '__main__':
send_post(url1,payload,headers)
'''response = requests.get("http://b-ssl.duitang.com/uploads/item/201707/20/20170720111208_EHX2K.jpeg")
print (response.content)
with open("love_img.jpeg","wb") as f:
f.write(response.content)
f.close()
#print(response.content)
#print(f.write(response.content).read_csv(f) )
print("Finish!")'''
```
#### File: Interface/run_cases/RunAll.py
```python
import unittest
import time
import os
from Common.HTMLTestRunner_jpg import HTMLTestRunner
def run_case(dir = "testCases"):
case_dir = os.path.dirname(os.getcwd()) + "\\" + dir
print(case_dir)
test_case = unittest.TestSuite()
discover = unittest.defaultTestLoader.discover(case_dir,pattern="test*.py",top_level_dir=None)
return discover
if __name__ == '__main__':
current_time = time.strftime("%Y-%m-%d-%H_%M_%S", time.localtime(time.time()))
report_path = os.path.dirname(os.getcwd()) + "\\report\\" + current_time + '.html' # 生成测试报告的路径
fp = open(report_path, "wb")
runner = HTMLTestRunner(stream=fp, title=u"TestReport", description=u'interface test report',verbosity=2)
runner.run(run_case())
fp.close()
```
#### File: Interface/testCases/test_01.py
```python
import unittest
import requests
from ddt import ddt,data,unpack
from Common.SendRequests import SendRequest
from Common.readExcel import ReadExcel
import os
import warnings
import re
#获取测试数据所在的目录
path = os.path.dirname(os.getcwd())+"\\data\\C1.2testCase1.xls"
#testData为list
testData = ReadExcel.readExcel(path, "C12testCase")
@ddt
class Test1(unittest.TestCase):
def setUp(self):
self.s = requests.session()
def tearDown(self):
pass
#将list里每个值传参
#list里每个值为字典类型数据
@data(*testData)
def test_yoyo_api(self,data):
respon = SendRequest().send_request(self.s, data)
#忽略warning
warnings.simplefilter("ignore", ResourceWarning)
#切割字符串取后面的部分
expect_json = data["expected_json"]
#print("expect_json")
#print(expect_json)
expected_status_code = data["expected_status_code"]
#print(expected_status_code)
expected_words = data["expected_words"]
url = data["url"]
#断言如果返回类型为json,则根据json 内容及status code来判断是否成功
if data["response_type"] == 'json':
self.assertIn(expect_json, str(respon.json()), url+"接口请求错误")
self.assertEqual(expected_status_code,respon.status_code,url+"接口状态返回不正确")
# 断言如果返回类型为html,则根据html内容 内容及status code来判断是否成功
else:
#通过正则表达式
htms = re.findall(r"有声双语美文:.*?_沪江英语学习网", respon.text) # 通过正则表达式 r"<a.*?>.*?</a>" 找到所有的数据并输出
for item in htms:
self.assertIn(expected_words, item, url+"接口请求错误")
#通过状态断言
self.assertEqual(expected_status_code, respon.status_code, url + "接口状态返回不正确")
if __name__ == '__main__':
unittest.main()
```
#### File: Interface/test/testSimple.py
```python
import requests
import json
import unittest
import re
import readExcel
import json
import os
path = os.path.dirname(os.getcwd())+"\\data\\C1.2testCase1.xls"
testData = readExcel.ReadExcel.readExcel(path, "C12testCase")
s = requests.session()
class UCTestCase(unittest.TestCase):
def test_send_request(self):
words= "'error': 0, 'msg': 'success'"
res = s.request(method='post', url='https://fanyi.baidu.com/langdetect',
headers={'Content-Type':'application/x-www-form-urlencoded','charset':'UTF-8'},params={'query':'I miss you'}, verify=False)
#返回的dict类型,str()将dict转换成str
re_json=str(res.json())
#type()查看数据类型
print(type(re_json))
#re_json.dump()
#print(re_json.dump())
print(res.status_code)
self.assertIn(words,re_json,'接口请求失败')
def qtest_01_post(self):
words = '学会感恩也就学会了快乐'
res = s.request(method='get', url='http://www.hjenglish.com/new/p1261643/',
headers={'content-type': 'text/html', 'charset': 'utf-8'}, verify=False)
#print(re.text)
#正则提取里面的文字
htms = re.findall(r"有声双语美文:.*?_沪江英语学习网", res.text) # 通过正则表达式 r"<a.*?>.*?</a>" 找到所有的数据并输出
for item in htms:
print(item)
#self.assertEqual(200,res.status_code,"接口请求失败")
self.assertIn(words,item,'接口请求失败')
#self.assertIn(self,words,re.text)
# print(re.status_code)
def learn_test_02_get(self):
re = s.request(method='post', url='https://fanyi.baidu.com/langdetect',
headers={'Content-Type': 'application/x-www-form-urlencoded', 'charset': 'UTF-8'},
params={'query': 'I miss you'}, verify=False)
print(re.json())
print(re.status_code)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1828Loop/infinitethesis",
"score": 4
} |
#### File: 1828Loop/infinitethesis/tachyonic_antitelephone.py
```python
c=299792458
#velocity of the message as a product of the speed of light (c)
message_multiplier=4.4
u=c*message_multiplier
#velocity of inertial frame through space as a product of c
inertial_frame_multiplier=0.99
V=c*inertial_frame_multiplier
##TOLMANS THOUGHT EXPERIMENT
#time in seconds the message is recieved after the event of sending
def Ts(u,V,c):
time=(1-((u*V)/c**2))/((1-(V**2/c**2))**0.5)
return time
##
#activate thought experiment with variables
T2 = Ts(u,V,c)
#print results (if result is negative - it represents that the message is recieved before it was sent)
print(T2)
``` |
{
"source": "18-2-SKKU-OSS/2018-2-OSS-E5--",
"score": 3
} |
#### File: data_structures/graph/even_tree.py
```python
from __future__ import print_function
# pylint: disable=invalid-name
from collections import defaultdict
def dfs(start):
"""DFS traversal"""
# pylint: disable=redefined-outer-name
ret = 1
visited[start] = True
for v in tree.get(start):
if v not in visited:
ret += dfs(v)
if ret % 2 == 0:
cuts.append(start)
return ret
def even_tree():
"""
2 1
3 1
4 3
5 2
6 1
7 2
8 6
9 8
10 8
edge (1,3) 및 (1,6)을 제거하면 원하는 결과인 2를 얻을 수 있습니다
"""
dfs(1)
if __name__ == '__main__':
n, m = 10, 9
tree = defaultdict(list)
visited = {}
cuts = []
count = 0
edges = [
(2, 1),
(3, 1),
(4, 3),
(5, 2),
(6, 1),
(7, 2),
(8, 6),
(9, 8),
(10, 8),
]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
```
#### File: data_structures/hashes_1/chaos_machine_for_hashing.py
```python
from __future__ import print_function
try:
input = raw_input # 파이썬 2
except NameError:
pass # 파이썬 3
# 카오스 머신 (K, t, m)
K = [0.33, 0.44, 0.55, 0.44, 0.33]; t = 3; m = 5
# 버퍼 공간 (매개 변수 공간 포함)
buffer_space, params_space = [], []
# 머신 시간
machine_time = 0
def push(seed):
global buffer_space, params_space, machine_time, \
K, m, t
# 동적 시스템 선택 (모두)
for key, value in enumerate(buffer_space):
# 진화 매개 변수
e = float(seed / value)
# 제어 이론 : 궤도 변경
value = (buffer_space[(key + 1) % m] + e) % 1
# 제어 이론 : 탄도 변화
r = (params_space[key] + e) % 1 + 3
# 변경 (전환 기능) - 점프
buffer_space[key] = \
round(float(r * value * (1 - value)), 10)
params_space[key] = \
r # 매개 변수 공간에 저장
# 물류지도
assert max(buffer_space) < 1
assert max(params_space) < 4
# 머신 타임
machine_time += 1
def pull():
global buffer_space, params_space, machine_time, \
K, m, t
# PRNG (George Marsaglia의 Xorshift)
def xorshift(X, Y):
X ^= Y >> 13
Y ^= X << 17
X ^= Y >> 5
return X
# 동적 시스템 선택 (증가)
key = machine_time % m
# 진화 (시간 길이)
for i in range(0, t):
# 변수 (위치 + 매개 변수)
r = params_space[key]
value = buffer_space[key]
# 변경 (전환 기능) - 흐름
buffer_space[key] = \
round(float(r * value * (1 - value)), 10)
params_space[key] = \
(machine_time * 0.01 + r * 1.01) % 1 + 3
# 혼돈 데이터 선택하기
X = int(buffer_space[(key + 2) % m] * (10 ** 10))
Y = int(buffer_space[(key - 2) % m] * (10 ** 10))
# 머신 타임
machine_time += 1
return xorshift(X, Y) % 0xFFFFFFFF
def reset():
global buffer_space, params_space, machine_time, \
K, m, t
buffer_space = K; params_space = [0] * m
machine_time = 0
#######################################
# 초기화
reset()
# 입력값 대입
import random
message = random.sample(range(0xFFFFFFFF), 100)
for chunk in message:
push(chunk)
# 제어를 위함
inp = ""
# 결과값 출력
while inp in ("e", "E"):
print("%s" % format(pull(), '#04x'))
print(buffer_space); print(params_space)
inp = input("(e)exit? ").strip()
```
#### File: 2018-2-OSS-E5--/machine_learning/1. Linear_Regression.py
```python
from __future__ import print_function
import requests
import numpy as np
def collect_dataset():
""" CSGO 데이터 셋 수집
선수의 ADR vs Rating 정보를 가지고 있는 데이터 셋
;retrun : 행렬화 된 데이터 셋
"""
response = requests.get('https://raw.githubusercontent.com/yashLadha/' +
'The_Math_of_Intelligence/master/Week1/ADRvs' +
'Rating.csv')
lines = response.text.splitlines()
data = []
for item in lines:
item = item.split(',')
data.append(item)
data.pop(0) # 리스트에서 레이블을 빼내는 과정
dataset = np.matrix(data)
return dataset
def run_steep_gradient_descent(data_x, data_y,
len_data, alpha, theta):
""" Gradient Descent 방법을 이용해 theta를 업데이트하는 함수
:param data_x : 데이터 셋
:param data_y : 결과(output)값
:param len_data : feature의 개수
:param alpha : 학습률 (Learning rate)
:param theta : weigths
;return : 업데이트된 weights(theta)
"""
n = len_data
hypothesis = np.dot(theta, data_x.transpose())
gradient = np.dot(hypothesis-data_y.transpose(),data_x)/n
theta = theta - alpha * gradient
return theta
def sum_of_square_error(data_x, data_y, len_data, theta):
""" 에러값 (sum of square error) 값을 반환하는 함수
:param data_x : 데이터셋
:param data_y : 결과값
:param len_data : feature의 개수
:param theta : weights
;return : 에러값
"""
hypothesis = np.dot(theta, data_x.transpose())
error = np.mean(np.square(hypothesis - data_y.transpose())) / 2
return error
def run_linear_regression(data_x, data_y):
""" 선형회귀를 시행하는 함수
:param data_x : 데이터 셋
:param data_y : 결과값
;return : 가장 예측을 잘하는 weights
"""
iterations = 100000
alpha = 0.0001550
no_features = data_x.shape[1]
len_data = data_x.shape[0] - 1
theta = np.zeros((1, no_features))
for i in range(0, iterations):
theta = run_steep_gradient_descent(data_x, data_y,
len_data, alpha, theta)
error = sum_of_square_error(data_x, data_y, len_data, theta)
print('At Iteration %d - Error is %.5f ' % (i + 1, error))
return theta
def main():
""" 메인 함수 """
data = collect_dataset()
len_data = data.shape[0]
data_x = np.c_[np.ones(len_data), data[:, :-1]].astype(float)
data_y = data[:, -1].astype(float)
theta = run_linear_regression(data_x, data_y)
len_result = theta.shape[1]
print('Resultant Feature vector : ')
for i in range(0, len_result):
print('%.5f' % (theta[0, i]))
if __name__ == '__main__':
main()
```
#### File: machine_learning/neural_network/convolution_neural_network.py
```python
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class CNN():
def __init__(self,conv1_get,size_p1,bp_num1,bp_num2,bp_num3,rate_w=0.2,rate_t=0.2):
'''
: param conv1_get : [a, c, d], 크기, 수, 회선 커널 단계
: param size_p1 : 풀링 크기
: param bp_num1 : 평평한 레이어의 단위 수
: 매개 변수 bp_num2 : 숨겨진 레이어의 단위 수
: param bp_num3 : 출력 레이어의 단위 수
: param rate_w : 가중치 학습의 비율
: param rate_t : 임계 값 학습 속도
'''
self.num_bp1 = bp_num1
self.num_bp2 = bp_num2
self.num_bp3 = bp_num3
self.conv1 = conv1_get[:2]
self.step_conv1 = conv1_get[2]
self.size_pooling1 = size_p1
self.rate_weight = rate_w
self.rate_thre = rate_t
self.w_conv1 = [np.mat(-1*np.random.rand(self.conv1[0],self.conv1[0])+0.5) for i in range(self.conv1[1])]
self.wkj = np.mat(-1 * np.random.rand(self.num_bp3, self.num_bp2) + 0.5)
self.vji = np.mat(-1*np.random.rand(self.num_bp2, self.num_bp1)+0.5)
self.thre_conv1 = -2*np.random.rand(self.conv1[1])+1
self.thre_bp2 = -2*np.random.rand(self.num_bp2)+1
self.thre_bp3 = -2*np.random.rand(self.num_bp3)+1
def save_model(self,save_path):
# 피클로 모델 사전 저장
import pickle
model_dic = {'num_bp1':self.num_bp1,
'num_bp2':self.num_bp2,
'num_bp3':self.num_bp3,
'conv1':self.conv1,
'step_conv1':self.step_conv1,
'size_pooling1':self.size_pooling1,
'rate_weight':self.rate_weight,
'rate_thre':self.rate_thre,
'w_conv1':self.w_conv1,
'wkj':self.wkj,
'vji':self.vji,
'thre_conv1':self.thre_conv1,
'thre_bp2':self.thre_bp2,
'thre_bp3':self.thre_bp3}
with open(save_path, 'wb') as f:
pickle.dump(model_dic, f)
print('Model saved: %s'% save_path)
@classmethod
def ReadModel(cls,model_path):
#저장된 모델 읽기
import pickle
with open(model_path, 'rb') as f:
model_dic = pickle.load(f)
conv_get= model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
size_p1 = model_dic.get('size_pooling1')
bp1 = model_dic.get('num_bp1')
bp2 = model_dic.get('num_bp2')
bp3 = model_dic.get('num_bp3')
r_w = model_dic.get('rate_weight')
r_t = model_dic.get('rate_thre')
# 모델 인스턴스 생성
conv_ins = CNN(conv_get,size_p1,bp1,bp2,bp3,r_w,r_t)
# 모델 매개 변수 수정
conv_ins.w_conv1 = model_dic.get('w_conv1')
conv_ins.wkj = model_dic.get('wkj')
conv_ins.vji = model_dic.get('vji')
conv_ins.thre_conv1 = model_dic.get('thre_conv1')
conv_ins.thre_bp2 = model_dic.get('thre_bp2')
conv_ins.thre_bp3 = model_dic.get('thre_bp3')
return conv_ins
def sig(self,x):
return 1 / (1 + np.exp(-1*x))
def do_round(self,x):
return round(x, 3)
def convolute(self,data,convs,w_convs,thre_convs,conv_step):
#컨벌루션 프로세스
size_conv = convs[0]
num_conv =convs[1]
size_data = np.shape(data)[0]
#원본 이미지 데이터의 데이터 조각을 가져온다. data_focus
data_focus = []
for i_focus in range(0, size_data - size_conv + 1, conv_step):
for j_focus in range(0, size_data - size_conv + 1, conv_step):
focus = data[i_focus:i_focus + size_conv, j_focus:j_focus + size_conv]
data_focus.append(focus)
#모든 단일 커널의 특성지도를 계산하고 매트릭스 목록으로 저장
data_featuremap = []
Size_FeatureMap = int((size_data - size_conv) / conv_step + 1)
for i_map in range(num_conv):
featuremap = []
for i_focus in range(len(data_focus)):
net_focus = np.sum(np.multiply(data_focus[i_focus], w_convs[i_map])) - thre_convs[i_map]
featuremap.append(self.sig(net_focus))
featuremap = np.asmatrix(featuremap).reshape(Size_FeatureMap, Size_FeatureMap)
data_featuremap.append(featuremap)
#데이터 슬라이스를 One 차원으로 확장
focus1_list = []
for each_focus in data_focus:
focus1_list.extend(self.Expand_Mat(each_focus))
focus_list = np.asarray(focus1_list)
return focus_list,data_featuremap
def pooling(self,featuremaps,size_pooling,type='average_pool'):
#풀링 프로세스
size_map = len(featuremaps[0])
size_pooled = int(size_map/size_pooling)
featuremap_pooled = []
for i_map in range(len(featuremaps)):
map = featuremaps[i_map]
map_pooled = []
for i_focus in range(0,size_map,size_pooling):
for j_focus in range(0, size_map, size_pooling):
focus = map[i_focus:i_focus + size_pooling, j_focus:j_focus + size_pooling]
if type == 'average_pool':
#평균 풀링
map_pooled.append(np.average(focus))
elif type == 'max_pooling':
#풀링 맥시멈
map_pooled.append(np.max(focus))
map_pooled = np.asmatrix(map_pooled).reshape(size_pooled,size_pooled)
featuremap_pooled.append(map_pooled)
return featuremap_pooled
def _expand(self,datas):
#3 차원 데이터를 하나의 차원 목록으로 확장
data_expanded = []
for i in range(len(datas)):
shapes = np.shape(datas[i])
data_listed = datas[i].reshape(1,shapes[0]*shapes[1])
data_listed = data_listed.getA().tolist()[0]
data_expanded.extend(data_listed)
data_expanded = np.asarray(data_expanded)
return data_expanded
def _expand_mat(self,data_mat):
#행렬을 하나의 차원 목록으로 확장
data_mat = np.asarray(data_mat)
shapes = np.shape(data_mat)
data_expanded = data_mat.reshape(1,shapes[0]*shapes[1])
return data_expanded
def _calculate_gradient_from_pool(self,out_map,pd_pool,num_map,size_map,size_pooling):
'''
풀 레이어의 데이터 슬라이스에서 그래디언트 계산
pd_pool : 행렬의리스트
out_map : 데이터 슬라이스의 모양 (size_map * size_map)
반환 : pd_all : 매트릭스 목록, [num, size_map, size_map]
'''
pd_all = []
i_pool = 0
for i_map in range(num_map):
pd_conv1 = np.ones((size_map, size_map))
for i in range(0, size_map, size_pooling):
for j in range(0, size_map, size_pooling):
pd_conv1[i:i + size_pooling, j:j + size_pooling] = pd_pool[i_pool]
i_pool = i_pool + 1
pd_conv2 = np.multiply(pd_conv1,np.multiply(out_map[i_map],(1-out_map[i_map])))
pd_all.append(pd_conv2)
return pd_all
def trian(self,patterns,datas_train, datas_teach, n_repeat, error_accuracy,draw_e = bool):
# 모델 training
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ',np.shape(datas_train)))
print((' - - Shape: Teach_Data ',np.shape(datas_teach)))
rp = 0
all_mse = []
mse = 10000
while rp < n_repeat and mse >= error_accuracy:
alle = 0
print('-------------Learning Time %d--------------'%rp)
for p in range(len(datas_train)):
#print('------------Learning Image: %d--------------'%p)
data_train = np.asmatrix(datas_train[p])
data_teach = np.asarray(datas_teach[p])
data_focus1,data_conved1 = self.convolute(data_train,self.conv1,self.w_conv1,
self.thre_conv1,conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1,self.size_pooling1)
shape_featuremap1 = np.shape(data_conved1)
'''
print(' -----original shape ', np.shape(data_train))
print(' ---- after convolution ',np.shape(data_conv1))
print(' -----after pooling ',np.shape(data_pooled1))
'''
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = np.dot(bp_out1,self.vji.T) - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = np.dot(bp_out2 ,self.wkj.T) - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
#--------------모델 learning ------------------------
#--------------오류 및 기울기 계산---------------
pd_k_all = np.multiply((data_teach - bp_out3), np.multiply(bp_out3, (1 - bp_out3)))
pd_j_all = np.multiply(np.dot(pd_k_all,self.wkj), np.multiply(bp_out2, (1 - bp_out2)))
pd_i_all = np.dot(pd_j_all,self.vji)
pd_conv1_pooled = pd_i_all / (self.size_pooling1*self.size_pooling1)
pd_conv1_pooled = pd_conv1_pooled.T.getA().tolist()
pd_conv1_all = self._calculate_gradient_from_pool(data_conved1,pd_conv1_pooled,shape_featuremap1[0],
shape_featuremap1[1],self.size_pooling1)
#가중치 및 임계 값 학습 과정---------
#회선 층------------------------
for k_conv in range(self.conv1[1]):
pd_conv_list = self._expand_mat(pd_conv1_all[k_conv])
delta_w = self.rate_weight * np.dot(pd_conv_list,data_focus1)
self.w_conv1[k_conv] = self.w_conv1[k_conv] + delta_w.reshape((self.conv1[0],self.conv1[0]))
self.thre_conv1[k_conv] = self.thre_conv1[k_conv] - np.sum(pd_conv1_all[k_conv]) * self.rate_thre
# 모든 연결된 층
self.wkj = self.wkj + pd_k_all.T * bp_out2 * self.rate_weight
self.vji = self.vji + pd_j_all.T * bp_out1 * self.rate_weight
self.thre_bp3 = self.thre_bp3 - pd_k_all * self.rate_thre
self.thre_bp2 = self.thre_bp2 - pd_j_all * self.rate_thre
# 모든 단일 이미지의 합계 오차를 계산한다.
errors = np.sum(abs((data_teach - bp_out3)))
alle = alle + errors
#print(' ----Teach ',data_teach)
#print(' ----BP_output ',bp_out3)
rp = rp + 1
mse = alle/patterns
all_mse.append(mse)
def draw_error():
yplot = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(all_mse, '+-')
plt.plot(yplot, 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(True, alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, ' - - Mse: %.6f' % mse))
if draw_e:
draw_error()
return mse
def predict(self,datas_test):
#model predict
produce_out = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ',np.shape(datas_test)))
for p in range(len(datas_test)):
data_test = np.asmatrix(datas_test[p])
data_focus1, data_conved1 = self.convolute(data_test, self.conv1, self.w_conv1,
self.thre_conv1, conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
data_bp_input = self._expand(data_pooled1)
bp_out1 = data_bp_input
bp_net_j = bp_out1 * self.vji.T - self.thre_bp2
bp_out2 = self.sig(bp_net_j)
bp_net_k = bp_out2 * self.wkj.T - self.thre_bp3
bp_out3 = self.sig(bp_net_k)
produce_out.extend(bp_out3.getA().tolist())
res = [list(map(self.do_round,each)) for each in produce_out]
return np.asarray(res)
def convolution(self,data):
# 체크 아웃 할 수 있도록 프로세스를 뒤얽힌 후 이미지의 데이터를 반환
data_test = np.asmatrix(data)
data_focus1, data_conved1 = self.convolute(data_test, self.conv1, self.w_conv1,
self.thre_conv1, conv_step=self.step_conv1)
data_pooled1 = self.pooling(data_conved1, self.size_pooling1)
return data_conved1,data_pooled1
if __name__ == '__main__':
pass
'''
put the example on other file
'''
```
#### File: 2018-2-OSS-E5--/Maths/absMax.py
```python
from abs import absVal
def absMax(x):
"""
>>>absMax([0,5,1,11])
11
>>absMax([3,-10,-2])
-10
"""
j = x[0]#compare from the first element to the last one
for i in x:
if absVal(i) > j:#if absolute value of x[i] is smaller than absolute Max value, swap them
j = i
return j
#BUG: i is apparently a list, TypeError: '<' not supported between instances of 'list' and 'int' in absVal
def main():
a = [1,2,-11]
print(absMax(a)) #find max absolue value
if __name__ == '__main__':
main()
```
#### File: 2018-2-OSS-E5--/Maths/check_armstrong.py
```python
def main():
n = int(input("Enter the number: ")) #input size of iteration
for i in range(1, n + 1): #iteration
b = checkarmstrong(i)
if b: #if isArmstorng, print each number
print(str(i) + " is an armstrong number")
def checkarmstrong(n):
t = n
sum_num = 0
while t != 0:
r = t % 10
sum_num = sum_num + (r * r * r) #armstrong number rule
t = t//10
if sum_num == n: #check whether the result and original input number are equal
return True
else:
return False
if __name__ == '__main__':
main()
```
#### File: 2018-2-OSS-E5--/other/two_sum.py
```python
from __future__ import print_function
def twoSum(nums, target):
"""
인자형: List[int]
목표값의 형: int
반환형: List[int]
"""
chk_map = {}
for index, val in enumerate(nums):
compl = target - val
if compl in chk_map:
indices = [chk_map[compl], index]
print(indices)
return [indices]
else:
chk_map[val] = index
return False
```
#### File: 2018-2-OSS-E5--/searches/1-1. sentinel_linear_search.py
```python
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
#선형탐색
def sentinel_linear_search(sequence, target):
"""
:param sequence : 탐색을 진행할 배열
:param target : 탐색할 키(key) 값
;return : 키 값이 있는 위치(index), 없을 경우 None
"""
sequence.append(target)
index = 0
while sequence[index] != target:
index += 1
sequence.pop()
if index == len(sequence):
return None
return index
if __name__ == '__main__':
user_input = raw_input('Enter numbers separated by comma:\n').strip()
sequence = [int(item) for item in user_input.split(',')]
target_input = raw_input('Enter a single number to be found in the list:\n')
target = int(target_input)
result = sentinel_linear_search(sequence, target)
if result is not None:
print('{} found at positions: {}'.format(target, result))
else:
print('Not found')
```
#### File: 2018-2-OSS-E5--/searches/5. quick_select.py
```python
import random
#분할
def _partition(data, pivot):
"""
pivot 값보다 상대적으로 작거나 같거나 큰
3가지 배열로 분할을 한다.
:param data : 분할을 진행할 배열
:param pivot : 피벗(pivot) 값
;return : 피벗 보다 작거나, 같거나 큰 3개의 배열
"""
less, equal, greater = [], [], []
for element in data:
if element.address < pivot.address:
less.append(element)
elif element.address > pivot.address:
greater.append(element)
else:
equal.append(element)
return less, equal, greater
#퀵셀렉탐색
def quickSelect(list, k):
"""
:param list : 탐색을 진행할 배열
:param k : k 번째 작은 숫자 탐색
;return : 키 값이 있는 위치(index), 없을 경우 None
"""
#k = len(list) // 정렬이 된 배열의 경우 k 값을 사용하여 중간값 구해서 피벗으로 사용
smaller = []
larger = []
pivot = random.randint(0, len(list) - 1)
pivot = list[pivot]
count = 0
smaller, equal, larger =_partition(list, pivot)
count = len(equal)
m = len(smaller)
#k is the pivot
if m <= k < m + count:
return pivot
# must be in smaller
elif m > k:
return quickSelect(smaller, k)
#must be in larger
else:
return quickSelect(larger, k - (m + count))
```
#### File: 2018-2-OSS-E5--/sorts/4.Tree_sort.py
```python
from __future__ import print_function
class node(): #Binary Search Tree를 구현한 class
def __init__(self, val): #시작할때 처음 값을 node에 넣어줍니다.
self.val = val
self.left = None
self.right = None
def insert(self,val): #insert 해주는 코드로서
if self.val:
if val < self.val: #root의 값보다 작을 경우 왼쪽 서브트리로
if self.left is None:
self.left = node(val)
else:
self.left.insert(val)
elif val > self.val: #root의 값보다 클 경우 오른쪽 서브트리로 넣어줍니다.
if self.right is None:
self.right = node(val)
else:
self.right.insert(val)
else:
self.val = val
"""
Binary Search Tree를 오름차순으로 출력하기위해선
inorder 순으로 배열에 저장하여 출력을 해야하기 위해 inorder 함수를 추가하였습니다.
"""
def inorder(root, res):
if root:
inorder(root.left,res)
res.append(root.val)
inorder(root.right,res)
def treesort(arr):
# Binary Search Tree를 만드는 코드입니다.
if len(arr) == 0:
return arr
root = node(arr[0])
for i in range(1,len(arr)):
root.insert(arr[i])
# 오름차순 출력을 위해 inorder 함수를 사용하였습니다.
res = []
inorder(root,res)
return res
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
for i in range(3):
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(treesort(unsorted))
``` |
{
"source": "18325391772/blog-code-example",
"score": 3
} |
#### File: 18-10to12/py-base64encode/basetest.py
```python
import mybase64
import base64, random, string, timeit, sys
def test():
str = b'jz4Vxns'
expect = base64.b64encode( str )
actual = mybase64.b64encode( str )
print( 'expect: ', expect )
print( 'actual: ', actual )
print( expect == actual )
_CHARS = string.ascii_letters + string.digits
def randomString():
# print( chars )
size = random.randint( 70, 100 )
rstr = ''.join( random.SystemRandom().choices( _CHARS, k = size ) )
return rstr.encode()
def encode1():
rstr = randomString()
base64.b64encode( rstr )
def encode2():
rstr = randomString()
mybase64.b64encode( rstr )
def compare():
rstr = randomString()
exp = base64.b64encode( rstr )
act = mybase64.b64encode( rstr )
if( exp != act ):
print( rstr )
print( exp )
print( act )
raise ValueError
loops = 10000
# print( 'encode comp: ', timeit.timeit( stmt = compare, number = loops ) )
print( sys.version )
print( 'random: ', timeit.timeit( randomString, number = loops ) )
print( 'encode1: ', timeit.timeit( stmt = encode1, number = loops ) )
print( 'encode2: ', timeit.timeit( stmt = encode2, number = loops ) )
```
#### File: 18-10to12/python_image_processing/video_matplot.py
```python
import matplotlib.pyplot as plt
from matplotlib import style
from functools import partial
import numpy as np
from datetime import datetime
FILE_NAME = "I:/video.dat"
WIDTH = 2096
HEIGHT = 150
CHANNELS = 4
PACK_SIZE = WIDTH * HEIGHT * CHANNELS
import cv2 as cv
def gen_image_from_file( file ):
with open( file, 'rb' ) as f:
records = iter( partial( f.read, PACK_SIZE ), b'' )
frame = next( records )
frame = next( records )
img = np.zeros( ( HEIGHT, WIDTH, CHANNELS ), dtype = np.uint8)
e1 = cv.getTickCount()
for y in range(0, HEIGHT):
for x in range( 0, WIDTH ):
pos = (y * WIDTH + x) * CHANNELS
for i in range( 0, CHANNELS - 1 ):
img[y][x][i] = frame[ pos + i ]
img[y][x][3] = 255
e2 = cv.getTickCount()
elapsed = ( e2 - e1 ) / cv.getTickFrequency()
print("Time Used: ", elapsed )
plt.imshow( img )
plt.tight_layout()
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.xticks([])
plt.yticks([])
plt.show()
import matplotlib.animation as animation
def image_animation():
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
try:
img = np.zeros( ( HEIGHT, WIDTH, CHANNELS ), dtype = np.uint8)
f = open( FILE_NAME, 'rb' )
records = iter( partial( f.read, PACK_SIZE ), b'' )
def animateFromData(i):
e1 = cv.getTickCount()
frame = next( records ) # drop a line data
for y in range( 0, HEIGHT ):
for x in range( 0, WIDTH ):
pos = (y * WIDTH + x) * CHANNELS
for i in range( 0, CHANNELS - 1 ):
img[y][x][i] = frame[ pos + i]
img[y][x][3] = 255
ax1.clear()
ax1.imshow( img )
e2 = cv.getTickCount()
elapsed = ( e2 - e1 ) / cv.getTickFrequency()
print( "FPS: %.2f, Used time: %.3f" % (1 / elapsed, elapsed ))
def optAnimateFromData(i):
e1 = cv.getTickCount()
frame = next( records ) # one image data
img = np.reshape( np.array( list( frame ), dtype = np.uint8 ), ( HEIGHT, WIDTH, CHANNELS ) )
img[ : , : , 3] = 255
ax1.clear()
ax1.imshow( img )
e2 = cv.getTickCount()
elapsed = ( e2 - e1 ) / cv.getTickFrequency()
print( "FPS: %.2f, Used time: %.3f" % (1 / elapsed, elapsed ))
# a = animation.FuncAnimation( fig, animateFromData, interval=30 )
a = animation.FuncAnimation( fig, optAnimateFromData, interval=30 )
plt.tight_layout()
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.xticks([])
plt.yticks([])
plt.show()
except StopIteration:
pass
finally:
f.close()
if __name__ == '__main__':
image_animation()
# gen_image_from_file( FILE_NAME )
```
#### File: plugin_python/plugintest/helper.py
```python
from pathlib import Path
_BASE_DIR = Path.home() / ".shells"
_LOG_DIR = _BASE_DIR / "logs"
_CONFIG_DIR = _BASE_DIR / "configs"
_LOG_DIR.mkdir(parents=True, exist_ok=True)
_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
def createLogger(name: str, stream = False):
"""create logger, specify name, for example: test.log
suffix is not necessary but helpful
"""
import logging
log_file = _LOG_DIR / name
logger = logging.getLogger()
fh = logging.FileHandler(log_file)
fh.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s"))
fh.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
if stream:
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter("%(asctime)s %(levelname)s: %(message)s"))
logger.addHandler(sh)
return logger
```
#### File: plugin_python/plugintest/plugin.py
```python
from .helper import createLogger
logger = createLogger('pluginLoader', stream=False, logger_prefix="[PluginLoader]")
class Events(dict):
def __getitem__(self,name):
if name not in self:
super().__setitem__(name, [])
return super().__getitem__(name)
def trigger(self, eventName: str, eventArgs = None, **kwargs):
"""trigger event `eventName`
"""
try:
for e in self[eventName]:
e(eventArgs, **kwargs)
except Exception as exc:
logger.warn("Exception at 'beforeRegisterRoutes: {}".format(exc))
def on(self, eventName: str, callback ):
"""append callback function for certain event
"""
self[eventName].append(callback)
import schedule
class BackTask(object):
def __init__(self, events):
self.events = events
def start(self):
"""this method makes sure that endless while loop running in a subthread and will not interfere webgui
"""
from threading import Thread
mt = Thread( target= self.__loop_monitor )
mt.daemon = True
mt.start()
def __loop_monitor(self):
import time
self.events.trigger("task_schedule", schedule)
logger.info( 'start background tasks....' )
count = 0
while True:
# schedule.run_pending()
try:
schedule.run_pending()
except Exception as e:
logger.warn("Schedule running: {}".format(e))
count += 1
if count == 3600:
logger.debug("One Hour Pass.")
count = 0
time.sleep( 1 )
def load_plugins(app):
from pathlib import Path
import os
pluginPath = Path(os.path.dirname(__file__)) / "plugins"
import sys
sys.path.insert(0, str(pluginPath))
# 过滤掉非插件文件
plugins = []
for file in pluginPath.iterdir():
if file.name.startswith('__'):
continue
if file.is_dir():
dir_content = [x.name for x in file.iterdir()]
if "__init__.py" in dir_content:
plugins.append(file)
dir_content.clear()
elif file.is_file() and file.suffix == ".py":
plugins.append(file)
import importlib
# 过滤掉依赖关系无法满足的插件
logger.info("开始过滤插件。")
plugin_module = []
no_dependences = []
plugin_file_names = [plugin_file.stem for plugin_file in plugins]
for plugin_file in plugins:
try:
name = plugin_file.stem
# print(' ==>', name)
if plugin_file.is_dir():
# following works for python3.6
pm = importlib.import_module("{}".format(name))
# following works for python3.7
# pm = importlib.import_module("{}/__init__".format(name))
else:
pm = importlib.import_module(name)
pm.name = name
if(not hasattr(pm, 'requirement') or len(pm.requirement) == 0):
logger.debug("导入无依赖插件 {}".format(pm))
no_dependences.append(pm)
else:
meet = True
for r in pm.requirement:
if r not in plugin_file_names:
meet = False
if meet:
plugin_module.append(pm)
logger.debug("导入有依赖插件 {}".format(pm))
except Exception as exc:
logger.warn("Load plugin `{}` Error: {}".format(plugin_file, exc))
plugins.clear()
importlib.invalidate_caches()
logger.info("过滤插件完毕,无依赖插件数:{},有依赖插件数: {}"\
.format(len(no_dependences), len(plugin_module)))
num_all_plugins = len(no_dependences) + len(plugin_module)
logger.info("准备初始化插件")
app.m_plugins = []
num_loaded = 0
level_loaded = 0
while(num_loaded < num_all_plugins and level_loaded < 3):
num_loaded += __do_load_plugin(app, plugin_module, no_dependences)
level_loaded += 1
logger.info("插件初始化完成, 加载了{}个插件".format(num_loaded))
def __do_load_plugin(app, plugin_module, no_dependences):
# 首先加载没有依赖的插件
# logger.debug("no_dependces: {}".format(no_dependences))
num_loaded = 0
for nd in no_dependences:
try:
logger.debug("初始化插件: {}".format(nd))
nd.init(app)
# logger.debug("load: {}".format(nd))
app.m_plugins.append(nd)
num_loaded += 1
except Exception as exc:
logger.warn("Init plugin `{}` Error: {}".format(nd, exc))
# logger.debug("""loaded: {}, \nno_depend: {},
# plugin_module: {}""".format(app.m_plugins, no_dependences, plugin_module))
no_dependences.clear()
# 满足依赖关系的插件
for depend in plugin_module:
for pm in app.m_plugins:
# for req in pm.requirement:
if pm.name in depend.requirement:
depend.requirement.remove(pm.name)
no_dependences.clear()
tmp_plugins = []
for pm in plugin_module:
if len(pm.requirement) > 0:
tmp_plugins.append(pm)
else:
no_dependences.append(pm)
plugin_module.clear()
for pm in tmp_plugins:
plugin_module.append(pm)
# logger.debug("no_depend: {}".format(no_dependences))
return num_loaded
```
#### File: 19-q2/image-dft/process.py
```python
import pycuda.autoinit
import pycuda.driver as drv
import numpy as np
cuda = drv
numpy = np
from pycuda.compiler import SourceModule
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# mpl.rcParams['figure.figsize'] = [3, 2]
# mpl.rcParams['figure.dpi'] = 1
import math
PI2 = math.pi * 2
from tqdm import tqdm, tnrange
def plti(im, **kwargs):
y = im.shape[0]
x = im.shape[1]
w = y / x * h
# fig=plt.figure(dpi=dpi)
# ax.plot(im, **kwargs)
# plt.figure(figsize=(w, h))
plt.imshow(im, interpolation='none', **kwargs)
plt.axis('off')
plt.show()
def to_greyscale(im, weights = np.c_[0.2989, 0.5870, 0.1140]):
tile = np.tile(weights, reps=(im.shape[0], im.shape[1], 1))
return np.sum(tile * im, axis = 2)
image = plt.imread("IMG_MIN.jpg")
h, w, d = image.shape
plti(image)
print(image.shape)
grey_img = to_greyscale(image)
plti(grey_img)
# plt.imsave("grey.png", grey_img)
## GPU
mod = SourceModule("""
#include <cuComplex.h>
#include <math_constants.h>
struct DoubleOperation {
int M, N; // so 64-bit ptrs can be aligned
int u, v;
float *ptr;
float *Fr;
float *Fi;
};
__global__ void double_array(DoubleOperation *a) {
a = &a[blockIdx.x + blockIdx.y * a->M];
// test = &test[blockIdx.x];
for (int y = threadIdx.y; y < a->N; y += blockDim.y) {
for (int x = threadIdx.x; x < a->M; x += blockDim.x) {
int idx = x + y * a->M;
float t = - CUDART_PI_F * 2 * (float(a->u * x) / a->M + float(a->v * y) / a->N);
a->Fr[idx] = a->ptr[idx] * cosf(t);
a->Fi[idx] = a->ptr[idx] * sinf(t);
}
}
}
""")
class DFTOpStruct:
mem_size = 16 + np.intp(0).nbytes * 3
_data = None
def __init__(self, array, struct_arr_ptr, u = 0, v = 0):
# self.data = cuda.to_device(array)
test = np.zeros_like(array, dtype=np.float32)
self.Fr = cuda.to_device(test)
self.shape, self.dtype = array.shape, array.dtype
test2 = np.zeros_like(array, dtype=np.float32)
self.Fi = cuda.to_device(test2)
self.shape, self.dtype = array.shape, array.dtype
cuda.memcpy_htod(int(struct_arr_ptr), memoryview(np.int32(array.shape[1]))) # Operation int
cuda.memcpy_htod(int(struct_arr_ptr) + 4, memoryview(np.int32(array.shape[0]))) # Operation int
cuda.memcpy_htod(int(struct_arr_ptr) + 8, memoryview(np.int32(u))) # Operation int
cuda.memcpy_htod(int(struct_arr_ptr) + 12, memoryview(np.int32(v))) # Operation int
cuda.memcpy_htod(int(struct_arr_ptr) + 16, memoryview(np.intp(int(self._data)))) # DoubleOperation ptr
cuda.memcpy_htod(int(struct_arr_ptr) + 16+ np.intp(0).nbytes , memoryview(np.intp(int(self.Fr))))
cuda.memcpy_htod(int(struct_arr_ptr) + 16+ np.intp(0).nbytes * 2, memoryview(np.intp(int(self.Fi))))
# print(np.int32(array.shape[1]), np.int32(array.shape[0]))
def __str__(self):
return str(cuda.from_device(self._data, self.shape, self.dtype))
def getFr(self):
return np.array(cuda.from_device(self.Fr, self.shape, self.dtype))
def getFi(self):
return np.array(cuda.from_device(self.Fi, self.shape, self.dtype))
def print_test(self):
print(str(cuda.from_device(self.Fr, self.shape, self.dtype)))
def print_test2(self):
print(str(cuda.from_device(self.Fi, self.shape, self.dtype)))
func = mod.get_function("double_array")
def subCalcDFT(img, us = 0, col = 16, vs = 0, row = 16):
"""
us: start of u
vs: start of v
col: range of u
"""
img = img.astype(np.float32)
batch_size = row * col
struct_arr = cuda.mem_alloc(batch_size * DFTOpStruct.mem_size) # 分配内存
ptr = [int(struct_arr) + DFTOpStruct.mem_size * i for i in range(batch_size)]
arr = []
DFTOpStruct._data = cuda.to_device(img)
# for u in tnrange(us, us + col, desc="Memory Alloacte"):
for u in range(us, us + col):
for v in range(vs, vs + row):
idx = u - us + (v - vs) * col
o = DFTOpStruct(img, ptr[idx], u = u, v = v)
arr.append(o)
func(struct_arr, block = (row, col, 1), grid=(1, 1))
F = np.zeros((row, col), dtype=np.complex)
# for u in tnrange(row, desc = "calc DFT"):
for u in range(row):
for v in range(col):
i = u + v * row
Fr = np.sum(arr[i].getFr())
Fi = np.sum(arr[i].getFi())
F[v][u] = Fr + 1j * Fi
return F
def calcDFT(img):
img = img.astyep(np.float32)
M = img.shape[1]
N = img.shape[0]
struct_arr = cuda.mem_alloc(M * N * DFTOpStruct.mem_size) # 分配内存
ptr = [int(struct_arr) + DFTOpStruct.mem_size * i for i in range(M * N)]
# img = np.arange(M * N, dtype=np.float32).reshape(M, N)
arr = []
DFTOpStruct._data = cuda.to_device(img)
for u in tnrange(M, desc="Memory Alloacte"):
for v in range(N):
idx = u + v * M
o = DFTOpStruct(img, ptr[idx], u = u, v = v)
arr.append(o)
func(struct_arr, block = (32, 32, 1), grid=(2, 1))
F = np.zeros((M, N), dtype=np.complex)
for u in tnrange(M, desc = "calc DFT"):
for v in range(N):
i = u + v * M
Fr = np.sum(arr[i].getFr())
Fi = np.sum(arr[i].getFi())
F[v][u] = Fr + 1j * Fi
return F
transed_img = np.zeros_like(grey_img, dtype=np.complex)
r = 32
c = 32
width = 288
height = 288
for y in tnrange(int(width / c), desc = "Y"):
for x in tnrange(int(height / r), desc="X"):
t_img = subCalcDFT(grey_img, us=x * c, col = c, vs = y * r, row = r)
transed_img[y * r:(y+1) * r, x * c:(x+1)* c] = t_img
## CPU
def calc(f, u, v):
value = 0
Ny = f.shape[0]
Nx = f.shape[1]
for ny in range(Ny):
for nx in range(Nx):
value += f[ny][nx] * np.exp(-1j * PI2* (nx/ Nx * u + ny/ Ny * v ))
# * np.exp(-PI2 * ky * ny / Ny)
return value
def dft(img):
trans_img = np.zeros(img.shape, dtype=np.complex)
height = img.shape[0]
width = img.shape[1]
for h in tnrange(height, desc="Height Loop"):
for w in range(width):
trans_img[h][w] = calc(img, w, h)
return trans_img
xoff = 280
yoff = 0
size = 64
min_grey_img = grey_img[yoff:yoff+size, xoff:xoff+size]
plti(min_grey_img)
transed_img = dft(min_grey_img)
plti(abs(transed_img))
abs_transed_img = abs(transed_img)
np.argmax(abs_transed_img)
abs_transed_img = abs_transed_img / abs_transed_img[0][0]
plti(abs_transed_img)
def idft(F):
f = np.zeros(F.shape, dtype=np.complex)
N = F.shape[0]
M = F.shape[1]
for h in tnrange(N, desc="Height Loop"):
for w in tnrange(M, desc = "Width Loop"):
f[h][w] = icalc(F, w, h)
return f
def icalc(F, x, y):
N = F.shape[0]
M = F.shape[1]
value = 0
for v in range(N):
for u in range(M):
value += F[v][u] * np.exp(1j * PI2 * (u/M *x + v/N *y))
return value / M / N
test = idft(transed_img)
``` |
{
"source": "18419NakamuraTemma/android-yolov3-linealarm",
"score": 2
} |
#### File: main/python/line.py
```python
from linebot import LineBotApi
from linebot.models import TextSendMessage
def send_message(message):
CHANNEL_ACCESS_TOKEN = 'Line Access Token'
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
messages = TextSendMessage(text=message)
line_bot_api.broadcast(messages=messages)
return message
``` |
{
"source": "184451j-nyp/MaliciousURL_Classification",
"score": 3
} |
#### File: 184451j-nyp/MaliciousURL_Classification/malicious_url_main.py
```python
from os.path import isfile
from joblib import load
import utility as ut
model_filename = 'malicious_url_model.pickle'
csv_filename = 'complete_dataset.csv'
def main():
if not isfile(model_filename):
print('Waiting for model to load...')
ut.train_model(csv_filename, model_filename)
clf = load(model_filename)
link = ut.tokenize_link(input('Enter link to see if it is malicious: '))
prediction = clf.predict(link)
print(f'This URL is {prediction[0]}')
if __name__ == '__main__':
main()
``` |
{
"source": "18461271/state_farm_driver_distraction_detection",
"score": 3
} |
#### File: 18461271/state_farm_driver_distraction_detection/utils.py
```python
from keras.preprocessing import image
import numpy as np
import bcolz
import itertools
from keras.utils.np_utils import to_categorical
gen_t1 = image.ImageDataGenerator(rotation_range=15, height_shift_range=0.05,
shear_range=0.1, channel_shift_range=20, width_shift_range=0.1)
#gen_t1 loss=0.6993, accuracy: 79.3299%, 5 epochs,
#gen_t1 loss=0.7558, accuracy: 80.3646%, 20 epochs,
#gen_t1 loss=0.8266, accuracy: 79.7241%, 20 epochs,
gen_t2 = image.ImageDataGenerator(rescale=1./255,featurewise_center=True,rotation_range=15,featurewise_std_normalization=True,
height_shift_range=0.05,width_shift_range=.1,
shear_range=0.1, channel_shift_range=20)
def get_batches(dirname, gen=image.ImageDataGenerator(), shuffle=True, batch_size=4, class_mode='categorical',
target_size=(224,224)):
return gen.flow_from_directory(dirname, target_size=target_size,
class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
def get_classes(path):
train_batches = get_batches(path+'keras_train_batch', shuffle=False, batch_size=1)
val_batches = get_batches(path+'keras_valid_batch', shuffle=False, batch_size=1)
test_batches = get_batches(path+'test', shuffle=False, batch_size=1)
return (val_batches.classes, train_batches.classes, onehot(val_batches.classes), onehot(train_batches.classes),
val_batches.filenames, train_batches.filenames, test_batches.filenames)
def get_data(path, target_size=(224,224)):
batches = get_batches(path, shuffle=False, batch_size=1, class_mode=None, target_size=target_size)
steps_per_epoch=len(train_batches )
return np.concatenate([batches.next() for i in range(steps_per_epoch)])
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def onehot(x):
return to_categorical(x)
``` |
{
"source": "18463105800/ssd.pruning.pytorch",
"score": 2
} |
#### File: ssd.pruning.pytorch/data/weishi.py
```python
from .config import HOME
import pickle
import os
import os.path as osp
import sys
import torch
import torch.utils.data as data
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader
# OpenCL may be enabled by default in OpenCV3;
# disable it because it because it's not thread safe and causes unwanted GPU memory allocations
cv2.ocl.setUseOpenCL(False)
import numpy as np
from .weishi_eval import weishi_eval
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
# This list will be updated once WeishiAnnotationTransform() is called
WEISHI_CLASSES = ( '__background__',# always index 0
'null', 'face', 'clothes', 'trousers', 'bag', 'shoes', 'glasses', 'dog', 'cat', 'fish',
'monkey', 'rabbit', 'bird', 'lobster', 'dolphin', 'panda', 'sheep', 'tiger', 'penguin',
'turtle', 'lizard', 'snake', 'elephant', 'parrot', 'hamster', 'marmot', 'horse', 'hedgehog',
'squirrel', 'chicken', 'guitar', 'piano', 'cello_violin', 'saxophone', 'guzheng', 'drum_kit',
'electronic_organ', 'pipa', 'erhu', 'bike', 'car', 'airplane', 'motorcycle', 'strawberry',
'banana', 'lemon', 'pig_peggy', 'dead_fish', 'pikachu', 'iron_man', 'spider_man',
'cell_phone', 'cake', 'cup', 'fountain', 'balloon', 'billards')
class WeishiAnnotationTransform(object):
"""Transforms a Weishi annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, label_file_path = None, class_to_ind=None, keep_difficult=False):
# change WEISHI_CLASSES if necessary
if label_file_path is not None:
global WEISHI_CLASSES # declare that WEISHI_CLASSES is changed globally by this function
WEISHI_CLASSES = list()
fin = open(label_file_path, 'r')
for line in fin.readlines():
line = line.strip()
WEISHI_CLASSES.append(line)
fin.close()
WEISHI_CLASSES = tuple(WEISHI_CLASSES)
self.class_to_ind = class_to_ind or dict(
zip(WEISHI_CLASSES, range(len(WEISHI_CLASSES))))
self.keep_difficult = keep_difficult
def __call__(self, target, width, height):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element.
target has been ET.Element type already when being passed inside
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = []
for obj in target.iter('object'):
difficult = int(obj.find('difficult').text) == 1
if not self.keep_difficult and difficult:
continue
name = obj.find('name').text.lower().strip()
bbox = obj.find('bndbox')
pts = ['xmin', 'ymin', 'xmax', 'ymax']
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res += [bndbox] # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
return res # [[xmin, ymin, xmax, ymax, label_ind], ... ]
class WeishiDetection(data.Dataset):
"""Weishi Detection Dataset Object
input is image, target is annotation
Arguments:
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
_annopath: path for a specific annotation, extract from .txt file later
_imgpath: path for a specific image, extract from .txt file later
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(self, root,
image_xml_path="input (jpg, xml) file lists",
label_file_path = None,
transform=None,
dataset_name='WEISHI'):
target_transform=WeishiAnnotationTransform(label_file_path)
self.root = root # used to store detection results
self.transform = transform
self.target_transform = target_transform
self._annopath = {}
self._imgpath = {}
self.name = dataset_name
# below two args are for evaluation dataset
self.image_xml_path = image_xml_path
self.ids = list() # store the names for each image, not useful in WEISHI dataset
fin = open(image_xml_path, "r")
count = 0
for line in fin.readlines():
line = line.strip()
des = line.split(' ')
self._annopath[count] = des[1]
self._imgpath[count] = des[0]
self.ids.append(str(count)) # assign unique id for each image for tracking detection results later
count = count + 1
fin.close()
def __getitem__(self, index):
im, gt, h, w = self.pull_item(index)
return im, gt
def __len__(self):
return len(self._imgpath)
def pull_item(self, index):
target = ET.parse(self._annopath[index]).getroot()
img = cv2.imread(self._imgpath[index])
height, width, channels = img.shape
if self.target_transform is not None:
target = self.target_transform(target, width, height)
if self.transform is not None:
target = np.array(target)
img, boxes, labels = self.transform(img, target[:, :4], target[:, 4])
# to rgb
img = img[:, :, (2, 1, 0)]
# img = img.transpose(2, 0, 1)
target = np.hstack((boxes, np.expand_dims(labels, axis=1)))
return torch.from_numpy(img).permute(2, 0, 1), target, height, width
# return torch.from_numpy(img), target, height, width
def pull_image(self, index):
'''Returns the original image object at index in PIL form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
PIL img
'''
return cv2.imread(self._imgpath[index], cv2.IMREAD_COLOR)
def pull_anno(self, index):
'''Returns the original annotation of image at index
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to get annotation of
Return:
list: [img_id, [(label, bbox coords),...]]
eg: ('001718', [('dog', (96, 13, 438, 332))])
'''
anno = ET.parse(self._annopath[index]).getroot()
gt = self.target_transform(anno, 1, 1)
return self.ids[index], gt
def pull_tensor(self, index):
'''Returns the original image at an index in tensor form
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
tensorized version of img, squeezed
'''
return torch.Tensor(self.pull_image(index)).unsqueeze_(0)
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
# write down the detection results
self._write_weishi_results_file(all_boxes)
# after getting the result file, do evaluation and store in output_dir
aps, map = self._do_python_eval(output_dir)
return aps, map
def _get_weishi_results_file_template(self):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'weishi_det_test' + '_{:s}.txt'
filedir = os.path.join(self.root, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_weishi_results_file(self, all_boxes):
for cls_ind, cls in enumerate(WEISHI_CLASSES):
cls_ind = cls_ind
if cls == '__background__':
continue
print('Writing {} WEISHI results file'.format(cls))
filename = self._get_weishi_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.ids):
index = index # not index[1]
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
# for a class in an image: {image_id} {score} {xcor} {xcor} {ycor} {ycor}
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
rootpath = self.root
cachedir = os.path.join(self.root, 'annotations_cache')
aps = []
# Similar to VOC
use_07_metric = True
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(WEISHI_CLASSES):
if cls == '__background__':
continue
filename = self._get_weishi_results_file_template().format(cls)
# self is dataset
rec, prec, ap = weishi_eval(filename, self, \
cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric)
# AP = AVG(Precision for each of 11 Recalls's precision)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
if output_dir is not None:
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
# MAP = AVG(AP for each object class)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
return aps, np.mean(aps)
```
#### File: ssd.pruning.pytorch/models/mobilenetv2.py
```python
import torch.nn as nn
import math
## kernel_size=(3, 3) pad = 1
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
## kernel_size=(1, 1) stride = 1 pad = 0 increase / reduce dims
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
self.use_res_connect = self.stride == 1 and inp == oup # using residual connect when input_channel = output_channel
self.oup = oup # for SSD multibox
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, 3, stride, 1, groups=inp * expand_ratio, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.): # width_mult also known as depth multiplier
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s # t: expand_ratio, c:, n: how many blocks of this type in total, s: stride
[1, 16, 1, 1],
[6, 24, 2, 2], # 2nd halfing
[6, 32, 3, 2], # 3rd halfing
[6, 64, 4, 2], # 4th halfing
[6, 96, 3, 1],
[6, 160, 3, 1], # 5th halfing is disabled for detection
# [6, 160, 3, 2], # 5th halfing
[6, 320, 1, 1],
] # 1 + 2 + 3 + 4 + 3 + 3 + 1 = 17 blocks in total
# building first layer
input_channel = int(32 * width_mult)
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
# the first block is different: uses a regular 3×3 convolution with 32 channels instead of the expansion layer
self.features = [conv_bn(3, input_channel, 2)] # 1st halfing
# building inverted residual blocks
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(InvertedResidual(input_channel, output_channel, s, t))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel)) # after this layer, connect to extra layers in SSD
self.features.append(nn.AvgPool2d(int(input_size/32))) # don't used in backbone
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier, this won't be used in backbone
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.last_channel, n_class),
)
def forward(self, x):
x = self.features(x)
x = x.view(-1, self.last_channel)
x = self.classifier(x)
return x
def mobilev2_layers(self):
# return a list containing conv_bn or InvertedResidual object one by one
layers = []
layers += self.features.children()
return layers
```
#### File: ssd.pruning.pytorch/models/SSD_mobile.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from layers import *
from data import voc, coco, xl #from config.py
from .backbones import mobilenetv1, mobilenetv2
from .mobilenetv2 import InvertedResidual
import os
# inherit nn.Module so it have .train()
class SSD_MobN1(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base MobileNetV1 followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: MobileNet v1 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
max_per_image: same as top_k, used in Detection, keep 200 detections per image by default
"""
def __init__(self, phase, size, base, extras, head, num_classes, cfg, max_per_image):
super(SSD_MobN1, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = cfg
self.priorbox = PriorBox(self.cfg)
# just create an object above, but need to call forward() to return prior boxes coords
self.priors = Variable(self.priorbox.forward(), volatile=True)
self.size = size
# SSD network
self.base = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(256, 20) # L2Norm(512, 20), 256 for mobilenetv1
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])#loc conv layer
self.conf = nn.ModuleList(head[1])#conf conv layer
#if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, self.cfg, max_per_image, 0.01, 0.45)
def forward(self, x, test=False):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
For each default box, predict both the shape offsets and the confidences for all object categories
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4] #variance???
"""
sources = list()# used for storing output of chosen layers, just concat them together
loc = list()
conf = list()
# apply mobilenet v1 to index 5
for k in range(6):
x = self.base[k](x)
s = self.L2Norm(x)#just a kind of normalization
sources.append(s)
# apply mobilenet v1 right before avg pool (the last layer in self.features)
for k in range(6, len(self.base) - 1): # 14
x = self.base[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
# l and c is two conv layers
loc.append(l(x).permute(0, 2, 3, 1).contiguous()) # store the output of loc conv layer
conf.append(c(x).permute(0, 2, 3, 1).contiguous()) # store the output of conf conv layer
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
#if self.phase == "test":
if test:
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# inherit nn.Module so it have .train()
class SSD_MobN2(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base MobileNetV2 followed by the
added multibox conv layers. Each multibox layer branches into
"""
def __init__(self, phase, size, base, extras, head, num_classes, cfg, max_per_image):
super(SSD_MobN2, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = cfg
self.priorbox = PriorBox(self.cfg)
# just create an object above, but need to call forward() to return prior boxes coords
self.priors = Variable(self.priorbox.forward(), volatile=True)
self.size = size
# SSD network
self.base = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.L2Norm = L2Norm(32, 20) # 512
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])#loc conv layer
self.conf = nn.ModuleList(head[1])#conf conv layer
#if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, self.cfg, max_per_image, 0.01, 0.45)
def forward(self, x, test=False):
"""Applies network layers and ops on input image(s) x.
"""
sources = list()# used for storing output of chosen layers, just concat them together
loc = list()
conf = list()
# apply mobilenet v2 to index 6
for k in range(7):
x = self.base[k](x)
s = self.L2Norm(x)#just a kind of normalization
sources.append(s)
# apply mobilenet v1 right after avg pool (the last layer in self.features)
for k in range(7, len(self.base) - 1): # 19
x = self.base[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
# l and c is two conv layers
loc.append(l(x).permute(0, 2, 3, 1).contiguous()) # store the output of loc conv layer
conf.append(c(x).permute(0, 2, 3, 1).contiguous()) # store the output of conf conv layer
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
#if self.phase == "test":
if test:
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(conf.size(0), -1, self.num_classes)), # conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
self.priors
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def add_extras(cfg, i, batch_norm=False):
# Extra layers added to mobileNet for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def mob1_multibox(mob1, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
mob1_source = [5, -2] #two con_dw
for k, v in enumerate(mob1_source):
# conv2d (in_channels, out_channels, kernel_size, stride, padding)
loc_layers += [nn.Conv2d(mob1[v][3].out_channels, #[3] is the last conv within conv_dw
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(mob1[v][3].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):# start k from 2
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return mob1, extra_layers, (loc_layers, conf_layers)
def mob2_multibox(mob2, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
mob2_source = [6, -2] #one InvertedResidual, one conv_1x1_bn
for k, v in enumerate(mob2_source):
if isinstance(mob2[v], InvertedResidual):
out_channels = mob2[v].oup # object InvertedResidual
else:
out_channels = mob2[v][0].out_channels # conv_1x1_bn
# conv2d (in_channels, out_channels, kernel_size, stride, padding)
loc_layers += [nn.Conv2d(out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):# start k from 2
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return mob2, extra_layers, (loc_layers, conf_layers)
# this is the dict based on which to build the vgg and extras layers one by one
extras = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location
'512': [],
}
def build_mssd(phase, cfg, size=300, num_classes=21, base='m1', max_per_image = 200, width_mult = 1.):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
# str(size) will change 300 to '300'
if base =='m2':
base_, extras_, head_ = mob2_multibox(mobilenetv2(width_mult),
add_extras(extras[str(size)], int(1280 * width_mult) if width_mult > 1.0 else 1280),
mbox[str(size)], num_classes)
return SSD_MobN2(phase, size, base_, extras_, head_, num_classes, cfg, max_per_image)
else:
base_, extras_, head_ = mob1_multibox(mobilenetv1(),
add_extras(extras[str(size)], 1024),
mbox[str(size)], num_classes)
return SSD_MobN1(phase, size, base_, extras_, head_, num_classes, cfg, max_per_image)
```
#### File: ssd.pruning.pytorch/pruning/prune_vgg_tools.py
```python
import torch
from torch.autograd import Variable
from torchvision import models
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in DataLoader
# OpenCL may be enabled by default in OpenCV3;
# disable it because it because it's not thread safe and causes unwanted GPU memory allocations
cv2.ocl.setUseOpenCL(False)
import sys
import numpy as np
def replace_layers(model, i, indexes, layers):
if i in indexes:
# layers and indexes store new layers used to update old layers
return layers[indexes.index(i)]
# if i not in indexes, use old layers
return model[i]
'''
--------------------------------------------------------------------------------
1. Prune conv layers in vgg without/with BN (only support layers stored in model.base for now)
Args:
model: model for pruning
layer_index: index the pruned layer's location within model
cut_ratio: the ratio of filters you want to prune from this layer (e.g. 20% - cut 20% lowest weights layers)
Adapted from: https://github.com/jacobgil/pytorch-pruning
'''
def prune_conv_layer(model, layer_index, cut_ratio=0.2, use_bn = False):
_, conv = list(model.base._modules.items())[layer_index]
if use_bn:
_, old_bn = list(model.base._modules.items())[layer_index + 1]
next_conv = None
offset = 1
# search for the next conv, based on current conv with id = (layer_index, filter_index)
while layer_index + offset < len(model.base._modules.items()):
res = list(model.base._modules.items())[layer_index+offset] # name, module
if isinstance(res[1], torch.nn.modules.conv.Conv2d):
next_name, next_conv = res
break
offset = offset + 1
if next_conv is None:
print("No filter will be prunned for this layer (last layer)")
return model
num_filters = conv.weight.data.size(0) # out_channels x in_channels x 3 x 3
# skip the layer with only one filter left
if num_filters <= 1:
print("No filter will be prunned for this layer (num_filters<=1)")
return model
cut = int(cut_ratio * num_filters)
if cut < 1:
print("No filter will be prunned for this layer (cut<1)")
return model
if (num_filters - cut) < 1:
print("No filter will be prunned for this layer (no filter left after cutting)")
return model
# rank the filters within this layer and store into filter_ranks
abs_wgt = torch.abs(conv.weight.data)
values = \
torch.sum(abs_wgt, dim = 1, keepdim = True).\
sum(dim=2, keepdim = True).sum(dim=3, keepdim = True)[:, 0, 0, 0]# .data
# Normalize the sum of weight by the filter dimensions in x 3 x 3
values = values / (abs_wgt.size(1) * abs_wgt.size(2) * abs_wgt.size(3)) # (filter_number for this layer, 1)
print("Ranking filters.. ")
filters_to_prune = np.argsort(values.cpu().numpy())[:cut] # order from smallest to largest
print("Filters that will be prunned", filters_to_prune)
print("Pruning filters.. ")
# the updated conv for current conv, with cut output channels being pruned
new_conv = \
torch.nn.Conv2d(in_channels = conv.in_channels, \
out_channels = conv.out_channels - cut,
kernel_size = conv.kernel_size, \
stride = conv.stride,
padding = conv.padding,
dilation = conv.dilation,
groups = conv.groups,
bias = conv.bias is not None) #(out_channels)
old_weights = conv.weight.data.cpu().numpy() # (out_channels, in_channels, kernel_size[0], kernel_size[1]
new_weights = new_conv.weight.data.cpu().numpy()
# skip that filter's weight inside old_weights and store others into new_weights
new_weights = np.delete(old_weights, filters_to_prune, axis = 0)
new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if conv.bias is not None: # no bias for conv layers
bias_numpy = conv.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune, axis = None)
new_conv.bias.data = torch.from_numpy(bias).cuda()
# BatchNorm modification TODO: Extract this function outside as a separate func.
if use_bn:
new_bn = torch.nn.BatchNorm2d(num_features=new_conv.out_channels, eps=old_bn.eps, momentum=old_bn.momentum, affine=old_bn.affine)
# old_bn.affine == True, need to copy learning gamma and beta to new_bn
# gamma: size = (num_features)
old_weights = old_bn.weight.data.cpu().numpy()
new_weights = new_bn.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune)
new_bn.weight.data = torch.from_numpy(new_weights).cuda()
# beta: size = (num_features)
bias_numpy = old_bn.bias.data.cpu().numpy()
# change size to (out_channels - cut)
bias = np.zeros(shape = (bias_numpy.shape[0] - cut), dtype = np.float32)
bias = np.delete(bias_numpy, filters_to_prune)
new_bn.bias.data = torch.from_numpy(bias).cuda()
# next_conv must exists
next_new_conv = \
torch.nn.Conv2d(in_channels = next_conv.in_channels - cut,\
out_channels = next_conv.out_channels, \
kernel_size = next_conv.kernel_size, \
stride = next_conv.stride,
padding = next_conv.padding,
dilation = next_conv.dilation,
groups = next_conv.groups,
bias = next_conv.bias is not None)
old_weights = next_conv.weight.data.cpu().numpy()
new_weights = next_new_conv.weight.data.cpu().numpy()
new_weights = np.delete(old_weights, filters_to_prune, axis = 1)
next_new_conv.weight.data = torch.from_numpy(new_weights).cuda()
if next_conv.bias is not None:
next_new_conv.bias.data = next_conv.bias.data
if use_bn:
# BatchNorm modification
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index, layer_index+1, layer_index+offset], \
[new_conv, new_bn, next_new_conv]) for i, _ in enumerate(model.base)))
del old_bn
else:
# replace current layer and next_conv with new_conv and next_new_conv respectively
base = torch.nn.Sequential(
*(replace_layers(model.base, i, [layer_index, layer_index+offset], \
[new_conv, next_new_conv]) for i, _ in enumerate(model.base)))
del model.base # delete and replace with brand new one
del conv
model.base = base
message = str(100*float(cut) / num_filters) + "%"
print("Filters prunned", str(message))
return model
``` |
{
"source": "1847123212/CircuitPython_GC9A01_demos",
"score": 2
} |
#### File: CircuitPython_GC9A01_demos/examples/gc9a01_gauge_knob.py
```python
import time
import math
import board
import busio
import displayio
import bitmaptools
import terminalio
from analogio import AnalogIn
import adafruit_imageload
from adafruit_display_text import label
import gc9a01
# change these as you like, keep the pointer center at 15,105
dial_background_filename = '/imgs/dial-background.bmp'
pointer_filename = '/imgs/pointer-red-basic-30x140-c15x105.bmp'
legend_text = "PERCENT\nAWESOME"
displayio.release_displays()
import os
board_type = os.uname().machine
if 'QT Py M0' in board_type or 'QT Py RP2040' in board_type:
# QT Py pinout
tft_clk = board.SCK
tft_mosi = board.MOSI
tft_rst = board.TX
tft_dc = board.RX
tft_cs = board.A3
tft_bl = board.A2 # optional
spi = busio.SPI(clock=tft_clk, MOSI=tft_mosi)
elif 'ItsyBitsay M4' in board_type:
tft_clk = board.SCK
tft_mosi = board.MOSI
tft_rst = board.MISO
tft_dc = board.D2
tft_cs = board.A5
tft_bl = board.A3 # optional
spi = busio.SPI(clock=tft_clk, MOSI=tft_mosi)
elif 'Pico' in board_type:
# # one pinout, on "southeast" side of Pico board
# tft_clk = board.GP18
# tft_mosi= board.GP19
# tft_rst = board.GP20
# tft_dc = board.GP16
# tft_cs = board.GP17
# tft_bl = board.GP21
# spi = busio.SPI(clock=tft_clk, MOSI=tft_mosi)
# another pinout, on "southwest" of Pico board
tft_clk = board.GP10
tft_mosi= board.GP11
tft_rst = board.GP12
tft_dc = board.GP13
tft_cs = board.GP14
tft_bl = board.GP15
spi = busio.SPI(clock=tft_clk, MOSI=tft_mosi)
# Analog knob to control dial
analog_in = AnalogIn(board.A1)
# Create displayio bus and display
display_bus = displayio.FourWire(spi, command=tft_dc, chip_select=tft_cs, reset=tft_rst)
display = gc9a01.GC9A01(display_bus, width=240, height=240,
backlight_pin=tft_bl, auto_refresh=False)
# Create main display group and add it to the display
main = displayio.Group()
display.show(main)
# 240x240 dial background
bg_bitmap,bg_pal = adafruit_imageload.load(dial_background_filename)
bg_tile_grid = displayio.TileGrid(bg_bitmap, pixel_shader=bg_pal)
main.append(bg_tile_grid)
# Text legend
text_area = label.Label(terminalio.FONT, text=legend_text, line_spacing=0.9, color=0x000000, anchor_point=(0.5,0.5), anchored_position=(0,0))
text_group = displayio.Group(scale=1, x=120, y=155)
text_group.append(text_area)
main.append(text_group) # Subgroup for text scaling
# 30x140 pointer
bitmap_pointer, palette_pointer = adafruit_imageload.load(pointer_filename, bitmap=displayio.Bitmap,palette=displayio.Palette)
palette_pointer.make_transparent(0)
# Blank bitmap the same size as the pointer bitmap
bitmap_pointer_blank = displayio.Bitmap(bitmap_pointer.width, bitmap_pointer.height, 1)# len(palette_pointer))
#bitmap_pointer_blank.fill(0)
# Transparent overlay that is "scribbled" into by rotozoom
# to create rotated version of pointer
bitmap_scribble = displayio.Bitmap(display.width, display.height, len(palette_pointer))
tile_grid = displayio.TileGrid(bitmap_scribble, pixel_shader=palette_pointer)
main.append(tile_grid)
# Do initial draw
display.refresh()
print("Hello World!")
# simple range mapper, like Arduino map()
def map_range(s, a, b):
(a1, a2), (b1, b2) = a, b
return b1 + ((s - a1) * (b2 - b1) / (a2 - a1))
# for dial 'dial-percenti.bmp', range is kinda:
# 0% - 100% => + 2.6 - -2.6
def percent_to_theta(p):
return map_range(p, (0.0,1.0), (-2.6, 2.6) )
percent = 0.0
last_time = time.monotonic()
while True:
percent = map_range( analog_in.value, (200,65400), (1,0))
theta = percent_to_theta(percent)
print("dt:",time.monotonic()-last_time,"theta:", theta, int(percent*100))
last_time = time.monotonic()
# erasing the entire bitmap is slow (~1fps, because of transparency I think)
# instead we erase just the region we modified, after refresh below
# bitmap_scribble.fill(0)
# offset rotation point (15,105) for bitmap_pointer's axis of rotation
bitmaptools.rotozoom( bitmap_scribble, bitmap_pointer, angle = theta, px=15,py=105)
display.refresh()
# after refresh, now "erase" the rotated pointer by doing a
# rotozom of a "blank" bitmap with only transparency
bitmaptools.rotozoom( bitmap_scribble, bitmap_pointer_blank, angle = theta, px=15,py=105)
``` |
{
"source": "1847123212/JMCUProgFast",
"score": 2
} |
#### File: 1847123212/JMCUProgFast/jlink.py
```python
import time
import ctypes
class JLink(object):
def __init__(self, dllpath, coretype):
self.jlk = ctypes.cdll.LoadLibrary(dllpath)
err_buf = (ctypes.c_char * 64)()
self.jlk.JLINKARM_ExecCommand('Device = %s' %coretype, err_buf, 64)
self.jlk.JLINKARM_TIF_Select(1)
self.jlk.JLINKARM_SetSpeed(12000)
self.jlk.JLINKARM_Reset()
def write_U32(self, addr, val):
self.jlk.JLINKARM_WriteU32(addr, val)
def write_U16(self, addr, val):
self.jlk.JLINKARM_WriteU16(addr, val)
def read_U32(self, addr):
buf = (ctypes.c_uint32 * 1)()
self.jlk.JLINKARM_ReadMemU32(addr, 1, buf, 0)
return buf[0]
def write_mem(self, addr, data):
if type(data) == list: data = ''.join([chr(x) for x in data])
buf = ctypes.create_string_buffer(data)
self.jlk.JLINKARM_WriteMem(addr, len(data), buf)
def read_mem(self, addr, size):
buf = ctypes.create_string_buffer(size)
self.jlk.JLINKARM_ReadMem(addr, size, buf)
return buf
def write_mem_U32(self, addr, data):
byte = []
for x in data:
byte.extend([x&0xFF, (x>>8)&0xFF, (x>>16)&0xFF, (x>>24)&0xFF])
self.write_mem(addr, byte)
NVIC_AIRCR = 0xE000ED0C
NVIC_AIRCR_VECTKEY = (0x5FA << 16)
NVIC_AIRCR_VECTRESET = (1 << 0)
NVIC_AIRCR_SYSRESETREQ = (1 << 2)
def reset(self, hardware_reset=False):
if hardware_reset:
raise NotImplemented()
else:
try:
self.write_U32(self.NVIC_AIRCR, self.NVIC_AIRCR_VECTKEY | self.NVIC_AIRCR_SYSRESETREQ)
except Exception:
pass
def halt(self):
self.jlk.JLINKARM_Halt()
def go(self):
self.jlk.JLINKARM_Go()
CORE_REGISTER = {
'r0' : 0,
'r1' : 1,
'r2' : 2,
'r3' : 3,
'r4' : 4,
'r5' : 5,
'r6' : 6,
'r7' : 7,
'r8' : 8,
'r9' : 9,
'r10' : 10,
'r11' : 11,
'r12' : 12,
'sp' : 13,
'r13' : 13,
'lr' : 14,
'r14' : 14,
'pc' : 15,
'r15' : 15,
'xpsr': 16,
}
def write_reg(self, reg, val):
self.jlk.JLINKARM_WriteReg(self.CORE_REGISTER[reg], val)
def read_reg(self, reg):
return self.jlk.JLINKARM_ReadReg(self.CORE_REGISTER[reg])
###################################################
TARGET_RUNNING = 1 # Core is executing code.
TARGET_HALTED = 2 # Core is halted in debug mode.
TARGET_RESET = 3 # Core is being held in reset.
TARGET_SLEEPING = 4 # Core is sleeping due to a wfi or wfe instruction.
TARGET_LOCKUP = 5 # Core is locked up.
# Debug Halting Control and Status Register
DHCSR = 0xE000EDF0
C_DEBUGEN = (1 << 0)
C_HALT = (1 << 1)
C_STEP = (1 << 2)
C_MASKINTS = (1 << 3)
C_SNAPSTALL = (1 << 5)
S_REGRDY = (1 << 16)
S_HALT = (1 << 17)
S_SLEEP = (1 << 18)
S_LOCKUP = (1 << 19)
S_RETIRE_ST = (1 << 24)
S_RESET_ST = (1 << 25)
# Debug Exception and Monitor Control Register
DEMCR = 0xE000EDFC
DEMCR_TRCENA = (1 << 24)
DEMCR_VC_HARDERR = (1 << 10)
DEMCR_VC_BUSERR = (1 << 8)
DEMCR_VC_CORERESET = (1 << 0)
DBGKEY = (0xA05F << 16)
def getState(self):
dhcsr = self.read_U32(self.DHCSR)
if dhcsr & self.S_RESET_ST:
newDhcsr = self.read_U32(self.DHCSR)
if (newDhcsr & self.S_RESET_ST) and not (newDhcsr & self.S_RETIRE_ST):
return self.TARGET_RESET
if dhcsr & self.S_LOCKUP:
return self.TARGET_LOCKUP
elif dhcsr & self.S_SLEEP:
return self.TARGET_SLEEPING
elif dhcsr & self.S_HALT:
return self.TARGET_HALTED
else:
return self.TARGET_RUNNING
def isRunning(self):
return self.getState() == self.TARGET_RUNNING
def isHalted(self):
return self.getState() == self.TARGET_HALTED
def setTargetState(self, state):
if state == "PROGRAM":
self.resetStopOnReset()
# Write the thumb bit in case the reset handler points to an ARM address
self.write_reg('xpsr', 0x1000000)
def resetStopOnReset(self):
"""perform a reset and stop the core on the reset handler"""
self.halt()
demcr = self.read_U32(self.DEMCR)
self.write_U32(self.DEMCR, demcr | self.DEMCR_VC_CORERESET) # enable the vector catch
self.reset()
self.waitReset()
while self.isRunning(): pass
self.write_U32(self.DEMCR, demcr)
def waitReset(self):
''' Now wait for the system to come out of reset '''
startTime = time.time()
while time.time() - startTime < 2.0:
try:
dhcsr = self.read_U32(self.DHCSR)
if (dhcsr & self.S_RESET_ST) == 0: break
except DAPAccess.TransferError:
time.sleep(0.01)
``` |
{
"source": "1847123212/openmv-ide",
"score": 2
} |
#### File: 1847123212/openmv-ide/make.py
```python
import argparse, glob, multiprocessing, os, re, shutil, stat, sys, subprocess
def match(d0, d1):
x = [x for x in os.listdir(d0) if re.match(d1, x)]
return os.path.join(d0, x[0]) if x else None
def search(d0, d1):
x = [x for x in os.listdir(d0) if re.search(d1, x)]
return os.path.join(d0, x[0]) if x else None
def find_qtdir(rpi):
if rpi:
os.environ["QTDIR"] = rpi
path = os.path.join(rpi, "bin") + ':'
os.environ["PATH"] = path + os.environ["PATH"]
return rpi
elif sys.platform.startswith('win'):
qtdir = match(os.sep, r"Qt")
if qtdir:
qtdir = match(qtdir, r"\d\.\d(\.\d)?")
if qtdir:
qtdir = search(qtdir, r"mingw")
if qtdir:
os.environ["QTDIR"] = qtdir
path = ';' + os.path.join(qtdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return qtdir
elif sys.platform.startswith('darwin'):
qtdir = match(os.path.expanduser('~'), r"Qt")
if qtdir:
qtdir = match(qtdir, r"\d\.\d(\.\d)?")
if qtdir:
qtdir = search(qtdir, r"clang")
if qtdir:
os.environ["QTDIR"] = qtdir
path = ':' + os.path.join(qtdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return qtdir
elif sys.platform.startswith('linux'):
qtdir = match(os.path.expanduser('~'), r"Qt")
if qtdir:
qtdir = match(qtdir, r"\d\.\d(\.\d)?")
if qtdir:
qtdir = search(qtdir, r"gcc")
if qtdir:
os.environ["QTDIR"] = qtdir
path = ':' + os.path.join(qtdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return qtdir
return None
def find_mingwdir():
if sys.platform.startswith('win'):
mingwdir = match(os.sep, r"Qt")
if mingwdir:
mingwdir = match(mingwdir, r"Tools")
if mingwdir:
mingwdir = search(mingwdir, r"mingw")
if mingwdir:
os.environ["MINGWDIR"] = mingwdir
path = ';' + os.path.join(mingwdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return mingwdir
return None
def find_qtcdir():
if sys.platform.startswith('win'):
qtcdir = match(os.sep, r"Qt")
if qtcdir:
qtcdir = match(qtcdir, r"Tools")
if qtcdir:
qtcdir = match(qtcdir, r"QtCreator")
if qtcdir:
os.environ["QTCDIR"] = qtcdir
path = ';' + os.path.join(qtcdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return qtcdir
return None
def find_ifdir():
if sys.platform.startswith('win'):
ifdir = match(os.sep, r"Qt")
if ifdir:
ifdir = search(ifdir, r"QtIFW")
if ifdir:
os.environ["IFDIR"] = ifdir
path = ';' + os.path.join(ifdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return ifdir
elif sys.platform.startswith('darwin'):
ifdir = match(os.path.expanduser('~'), r"Qt")
if ifdir:
ifdir = search(ifdir, r"QtIFW")
if ifdir:
os.environ["IFDIR"] = ifdir
path = ':' + os.path.join(ifdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return ifdir
elif sys.platform.startswith('linux'):
ifdir = match(os.path.expanduser('~'), r"Qt")
if ifdir:
ifdir = search(ifdir, r"QtIFW")
if ifdir:
os.environ["IFDIR"] = ifdir
path = ':' + os.path.join(ifdir, "bin")
os.environ["PATH"] = os.environ["PATH"] + path
return ifdir
return None
def make():
__folder__ = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser(description =
"Make Script")
parser.add_argument("--rpi", nargs = '?',
help = "Cross Compile QTDIR for the Raspberry Pi")
parser.add_argument("-u", "--upload", nargs = '?',
help = "FTP Password")
args = parser.parse_args()
if args.rpi and not sys.platform.startswith('linux'):
sys.exit("Linux Only")
###########################################################################
cpus = multiprocessing.cpu_count()
qtdir = find_qtdir(args.rpi)
mingwdir = find_mingwdir()
qtcdir = find_qtcdir()
ifdir = find_ifdir()
builddir = os.path.join(__folder__, "build")
installdir = os.path.join(builddir, "install")
if not os.path.exists(builddir):
os.mkdir(builddir)
installer = ""
if args.rpi:
# Add Fonts...
if os.path.exists(os.path.join(installdir, "lib/Qt/lib/fonts")):
shutil.rmtree(os.path.join(installdir, "lib/Qt/lib/fonts"), ignore_errors = True)
shutil.copytree(os.path.join(__folder__, "dejavu-fonts/fonts/"),
os.path.join(installdir, "lib/Qt/lib/fonts"))
# Add README.txt...
with open(os.path.join(installdir, "README.txt"), 'w') as f:
f.write("Please run setup.sh to install OpenMV IDE dependencies... e.g.\n\n")
f.write("./setup.sh\n\n")
f.write("source ~/.bashrc\n\n")
f.write("./bin/openmvide.sh\n\n")
# Add setup.sh...
with open(os.path.join(installdir, "setup.sh"), 'w') as f:
f.write("#! /bin/sh\n\n")
f.write("sudo apt-get install -y libxcb* libGLES* libts* libsqlite* libodbc* libsybdb* libusb-1.0 python-pip\n")
f.write("sudo pip install pyusb\n\n")
f.write("sudo cp $( dirname \"$0\" )/share/qtcreator/pydfu/50-openmv.rules /etc/udev/rules.d/50-openmv.rules\n")
f.write("sudo udevadm control --reload-rules\n\n")
f.write("if [ -z \"${QT_QPA_PLATFORM}\" ]; then\n")
f.write(" echo >> ~/.bashrc\n")
f.write(" echo \"# Force Qt Apps to use xcb\" >> ~/.bashrc\n")
f.write(" echo \"export QT_QPA_PLATFORM=xcb\" >> ~/.bashrc\n")
f.write(" echo\n")
f.write(" echo Please type \"source ~/.bashrc\".\n")
f.write("fi\n\n")
os.chmod(os.path.join(installdir, "setup.sh"),
os.stat(os.path.join(installdir, "setup.sh")).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Build...
if os.system("cd " + builddir +
" && qmake ../qt-creator/qtcreator.pro -r" +
" && make -r -w -j" + str(cpus) +
" && make bindist INSTALL_ROOT="+installdir):
sys.exit("Make Failed...")
installer = glob.glob(os.path.join(builddir, "openmv-ide-*.tar.gz"))[0]
elif sys.platform.startswith('win'):
if os.system("cd " + builddir +
" && qmake ../qt-creator/qtcreator.pro -r -spec win32-g++" +
" && jom -j" + str(cpus) +
" && jom installer INSTALL_ROOT="+installdir + " IFW_PATH="+ifdir):
sys.exit("Make Failed...")
installer = glob.glob(os.path.join(builddir, "openmv-ide-*.exe"))[0]
elif sys.platform.startswith('darwin'):
if os.system("cd " + builddir +
" && qmake ../qt-creator/qtcreator.pro -r -spec macx-clang CONFIG+=x86_64" +
" && make -j" + str(cpus) +
" && make deployqt"):
sys.exit("Make Failed...")
os.system("cd " + builddir + " && make codesign SIGNING_IDENTITY=Application")
if os.system("cd " + builddir + " && make dmg"):
sys.exit("Make Failed...")
installer = glob.glob(os.path.join(builddir, "openmv-ide-*.dmg"))[0]
elif sys.platform.startswith('linux'):
# Add Fonts...
if os.path.exists(os.path.join(installdir, "lib/Qt/lib/fonts")):
shutil.rmtree(os.path.join(installdir, "lib/Qt/lib/fonts"), ignore_errors = True)
shutil.copytree(os.path.join(__folder__, "dejavu-fonts/fonts/"),
os.path.join(installdir, "lib/Qt/lib/fonts"))
# Add README.txt...
with open(os.path.join(installdir, "README.txt"), 'w') as f:
f.write("Please run setup.sh to install OpenMV IDE dependencies... e.g.\n\n")
f.write("./setup.sh\n\n")
f.write("./bin/openmvide.sh\n\n")
# Add setup.sh...
with open(os.path.join(installdir, "setup.sh"), 'w') as f:
f.write("#! /bin/sh\n\n")
f.write("sudo apt-get install -y libusb-1.0 python-pip\n")
f.write("sudo pip install pyusb\n\n")
f.write("sudo cp $( dirname \"$0\" )/share/qtcreator/pydfu/50-openmv.rules /etc/udev/rules.d/50-openmv.rules\n")
f.write("sudo udevadm control --reload-rules\n\n")
os.chmod(os.path.join(installdir, "setup.sh"),
os.stat(os.path.join(installdir, "setup.sh")).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
# Build...
if os.system("cd " + builddir +
" && qmake ../qt-creator/qtcreator.pro -r -spec linux-g++" +
" && make -r -w -j" + str(cpus) +
" && make installer INSTALL_ROOT="+installdir + " IFW_PATH="+ifdir):
sys.exit("Make Failed...")
installer = glob.glob(os.path.join(builddir, "openmv-ide-*.run"))[0]
else:
sys.exit("Unknown Platform")
###########################################################################
if args.upload:
remotedir = os.path.splitext(os.path.basename(installer))[0]
if args.rpi: # Remove .tar
remotedir = os.path.splitext(remotedir)[0]
uploaddir = os.path.join(builddir, remotedir)
if not os.path.exists(uploaddir):
os.mkdir(uploaddir)
shutil.copy2(installer, uploaddir)
subprocess.check_call(["python", "ftpsync.py", "-u", "-l",
"ftp://upload<EMAIL>:"+args.upload+"@ftp.<EMAIL>.io/"+remotedir,
uploaddir])
if __name__ == "__main__":
make()
``` |
{
"source": "1849300/ADM-HW3",
"score": 3
} |
#### File: ADM-HW3/preprocessing/__init__.py
```python
from nltk import RegexpTokenizer, SnowballStemmer
from nltk.corpus import stopwords
def first(t):
# It tokenizes the string
tokenizer = RegexpTokenizer(r'\w+')
token=tokenizer.tokenize(t)
words = []
for p in range(len(token)):
# It checks if there are stopwords
if token[p] not in stopwords.words('english') and token[p] not in ["MyAnimeList","net"]:
words.append(token[p])
# It stemmes the words
snowball = SnowballStemmer(language='english')
res=[]
for p in words:
t = snowball.stem(p)
res.append(t)
return res
def write_pre(text,name):
# It writes a dataframe (preprocessed text)
id_doc=int(name.split("_")[1].strip(".tsv"))
with open ("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"+str(id_doc)+".tsv","w") as f:
text.to_csv(f,sep="\t")
```
#### File: ADM-HW3/Search_engine/__init__.py
```python
import os
import pickle
import pandas as pd
from Search_engine.Vocabulary import Vocabulary
from Search_engine.inverted import inverted_index
from preprocessing import first,write_pre
pd.set_option('display.max_colwidth', None)
from nltk import RegexpTokenizer
def vocabulary(field):
'''
It creates the vocabulary that links each word to a number.
If field == True then it maps all the words inside the animeDescription section else it maps all the words from the document
'''
voc=Vocabulary()
tokenizer = RegexpTokenizer(r'\w+')
for file in os.listdir("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"):
# Reads the preprocessed file
dataframe=pd.read_csv("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"+file,sep="\t", index_col = [0])
if field:
reviews=dataframe["animeDescription"].to_string(index=False)
token=tokenizer.tokenize(reviews)
# It maps each tokenized word in animeDescription in a number
for word in token:
voc.add(word)
else:
# It maps each tokenized word in each section of the document in a number
for index in dataframe:
data_field=dataframe[index].to_string(index=False)
if index in ["animeTitle","animeType","animeDescription","animeRelated","animeCharacters","animeVoices","animeStaff"]:
token=tokenizer.tokenize(data_field)
for word in token:
voc.add(word)
else:
voc.add(data_field)
# It writes the vocabulary on the disk
voc.write()
return voc
# It preprocesses each document and save it
def preprocessing_files():
columns=["animeUrl","animeNumEpisode","releaseDate","endDate","animeNumMembers",
"animeScore","animeUsers","animeRank","animePopularity"]
for file in os.listdir("/content/drive/My Drive/ADM-HW3/HW3/tsvFiles/"):
id_file=file.split("_")[1].strip(".tsv")
dataframe=pd.read_csv("/content/drive/My Drive/ADM-HW3/HW3/tsvFiles/"+file,sep="\t")
for index in dataframe:
# If the column associated with the considered string is in the columns list we don't process it
if index not in columns:
stringOb=dataframe[index].to_string(index=False)
dataframe[index] = " ".join(first(stringOb))
with open("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"+id_file+".tsv","w") as f:
dataframe.to_csv(f,sep="\t")
def inverted(Voc,field):
# It creates the inverted index
voc=inverted_index()
tokenizer = RegexpTokenizer(r'\w+')
for file in os.listdir("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"):
id_file=file.strip(".tsv")
dataframe=pd.read_csv("/content/drive/My Drive/ADM-HW3/HW3/preprocessed_files/"+file,sep="\t", index_col = [0])
# If field == True creates the inverted index using only the words in animeDescription
if field:
reviews=dataframe["animeDescription"].to_string(index=False)
token=tokenizer.tokenize(reviews)
for word in token:
word=Voc.myget(word)
voc.add(word,id_file)
else:
# If field == False creates the inverted index using all the words in the document
for index in dataframe:
data_field=dataframe[index].to_string(index=False)
if index in ["animeTitle","animeType","animeDescription","animeRelated","animeCharacters","animeVoices","animeStaff"]:
token=tokenizer.tokenize(data_field)
for word in token:
word=Voc.myget(word)
voc.add(word,id_file)
else:
word=Voc.myget(data_field)
voc.add(word,id_file)
voc.write()
return voc
def And_query(query,check,field):
''' It computes the and query '''
# It checks if vocabulary is in the dir
if "vocabulary" in os.listdir("/content/drive/My Drive/ADM-HW3/HW3/"):
voc=readVoc()
else:
voc=vocabulary(field)
# It checks if inverted_index is in the dir
if "inverted_index" in os.listdir("/content/drive/My Drive/ADM-HW3/HW3/"):
vocI=readInv()
else:
vocI=inverted(voc,field)
# It preprocesses the query
query=first(query)
f_set=set()
# It intersects the sets of documents that contain each word
for word in query:
word=voc.myget(word)
if len(f_set)==0:
f_set.update(vocI.get_res(word))
else:
f_set=f_set.intersection(vocI.get_res(word))
# It sorts the document in ascending order of comparison
f_list=sorted(map(int,list(f_set)))
# If check == True it prints the result
if check:
read_info(f_list)
else:
return f_list
def read_info(f_set):
# It reads all the important informations from the files
for file in f_set:
with open("/content/drive/My Drive/ADM-HW3/HW3/tsvFiles/anime_"+str(file)+".tsv") as f:
dataframe=pd.read_csv(f,sep="\t")
title=dataframe["animeTitle"].to_string(index=False).strip(" - MyAnimeList.net")
description =dataframe["animeDescription"].to_string(index=False)
url=dataframe["animeUrl"].to_string(index=False)
# Printing results
print("title, {}\ndescription, {}\nurl, {}\n".format(title,description,url))
def readVoc():
# It reads the created vocabulary
with open("/content/drive/My Drive/ADM-HW3/HW3/vocabulary","rb") as f:
voc=pickle.load(f)
return voc
def readInv():
# It reads the created inverted index
with open("/content/drive/My Drive/ADM-HW3/HW3/inverted_index","rb") as f:
voc=pickle.load(f)
return voc
```
#### File: ADM-HW3/Search_engine/Vocabulary.py
```python
import pickle
class Vocabulary:
# It contains all the words mapped to numbers
def __init__(self):
# Creation on the object
self.name="vocabulary"
self.dict={}
self.number=0
def add(self, word):
# Adds element to the object self
if word not in self.dict:
self.dict[word]=self.number
self.number+=1
def myget(self,word):
# It returns the number associated with the given word
return self.dict[word]
def write(self):
# It writes the object self in a binary file
with open("/content/drive/My Drive/ADM-HW3/HW3/"+self.name,"wb") as f:
pickle.dump(self,f)
def print(self):
# It prints the dictionary inside the object self
print(self.dict)
def getKeys(self):
# It returns the keys of the dictionary inside self
return self.dict.keys()
def getValues(self):
# It returns the values of the dictionary inside self
return self.dict.values()
``` |
{
"source": "1849300/ADM-HW5",
"score": 4
} |
#### File: 1849300/ADM-HW5/fun_three.py
```python
import trees as t
import math
import numpy as np
import random
import networkx as nx
import matplotlib.pyplot as plt
def shortest_path(graph, s, t):
''' It computes the shortest path between nodes s and t in graph '''
if s == t:
return "There is no path, source and target nodes are the same", -1
unvisited, shortest_path, predecessor = list(), dict(), dict()
# We set the distances between the source node and all other nodes to infinity, except for the distance between source
# and itself, which we set to 0.
for node in graph.nodes():
shortest_path[node] = math.inf
unvisited.append(node)
shortest_path[s] = 0
# We loop until we visit all the nodes in the graph
while unvisited:
# We choose the node with the smallest value as the “current node”
current_node = None
for node in unvisited:
if current_node == None:
current_node = node
elif shortest_path[node] < shortest_path[current_node]:
current_node = node
# Visit all the neighbour of current_node. As we visit each neighbor, we update their tentative distance
# from the starting node
for neighbor in graph.neighbors(current_node):
value = shortest_path[current_node] + \
graph[current_node][neighbor]['weight']
if value < shortest_path[neighbor]:
shortest_path[neighbor] = value
predecessor[neighbor] = current_node
unvisited.remove(current_node)
# Now we have to return the path using predecessor dictionary
if t not in predecessor:
return "Not possible, there is no path between target and source", -1
last = t
path = list([last])
while last != s:
path.append(predecessor[last])
last = predecessor[last]
return path
def selectRandom(nodes):
''' Select at random one node '''
return random.choice(list(nodes))
def selectRandomSeq(nodes):
''' Select at random a certain number of nodes (in this case 5) '''
return np.random.choice(nodes, 5)
def fun3(start, stop, p, p1, pn):
''' It computes the shortest ordered path '''
graph = t.build('a', start, stop)
# Create the path between the starting node and the first of the sequence
path0 = list(shortest_path(graph, p1, p[0])[::-1][:-1])
# Create a path between a node and his following in the sequence of nodes
path = []
for i in range(len(p)-1):
percorso = list(shortest_path(graph, p[i], p[i+1]))
if percorso[1] == -1:
return "Not possible"
percorso = percorso[::-1]
path.extend(percorso[:-1])
# Create the path between the last node of the sequence and the last node of the path (pn)
pathn = list(shortest_path(graph, p[-1], pn)[::-1])
path.extend(pathn)
path0.extend(path)
return path0
def vis3(start, stop, p, p1, pn):
lista = fun3(start, stop, p, p1, pn)
if lista == 'Not possible':
return lista
edges = []
for i in range(len(lista)-1):
edge = (lista[i], lista[i+1])
edges.append(edge)
print(edges)
grafo = nx.DiGraph(edges)
color = []
for node in grafo:
if node == p1 or node == pn:
color.append('green')
elif node in p:
color.append('yellow')
else:
color.append('blue')
plt.clf()
nx.draw_networkx(grafo, node_color=color)
plt.show()
``` |
{
"source": "1850061/dash_plotly_practise",
"score": 3
} |
#### File: dash_plotly_practise/src/app.py
```python
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
import numpy as np
from DataProcess import *
app = dash.Dash()
df_school_type = pd.read_csv(
'../lab3_datasets/college-salaries/salaries-by-college-type.csv')
df_school_region = pd.read_csv(
'../lab3_datasets/college-salaries/salaries-by-region.csv')
df_school_type = data_handle(df_school_type)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Dropdown(
id='school_type_or_region',
options=[{'label': i, 'value': i} for i in ['School Type', 'Region']],
value='School Type'
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
dcc.Dropdown(
id='salary_type',
options=[{'label': i, 'value': i} for i in get_salary_type()],
value='Mid-Career Median Salary'
)
], style={'width': '49%', 'float': 'right', 'display': 'inline-block'})
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'
}),
html.Div([
dcc.Graph(
# 左边的柱状图
id='start_mid_salary_compare',
)
], style={'width': '49%', 'display': 'inline-block', 'padding': '0 20'}),
html.Div([
# 右边的箱型图
dcc.Graph(id='salary_box')
], style={'display': 'inline-block', 'width': '49%'}),
html.Div([
# 下边的Sunburst图
dcc.Graph(id='sunburst_salary_box'),
], style={'display': 'inline-block', 'width': '49%'}),
html.Div([
# 具体学校的图片
dcc.Graph(id='certain_school'),
], style={'display': 'inline-block', 'width': '49%'}),
])
@app.callback(
dash.dependencies.Output('start_mid_salary_compare', 'figure'),
[dash.dependencies.Input('school_type_or_region', 'value')])
def update_start_mid_salary_compare_graph(school_type_or_region):
graph_date = get_start_mid_salary_compare_data(school_type_or_region)
trace_1 = go.Bar(
x=graph_date['x'],
y=graph_date['Starting Median Salary'],
name='Starting Median Salary'
)
trace_2 = go.Bar(
x=graph_date['x'],
y=graph_date['Mid-Career Median Salary'],
name='Mid-Career Median Salary'
)
trace = [trace_1, trace_2]
layout = go.Layout(
title=school_type_or_region + ' --- Salary Graph',
# 横坐标设置
xaxis={
'title': school_type_or_region,
},
# 纵坐标设置
yaxis={
'title': 'Salary',
},
margin={'l': 50, 'b': 30, 't': 50, 'r': 0},
height=450,
hovermode='closest'
)
return {
'data': trace,
'layout': layout
}
@app.callback(
dash.dependencies.Output('salary_box', 'figure'),
[dash.dependencies.Input('school_type_or_region', 'value'),
dash.dependencies.Input('salary_type', 'value')])
def update_salary_box_graph(school_type_or_region, salary_type):
graph_date = get_salary_box_data(school_type_or_region, salary_type)
trace = []
for i in range(len(graph_date['x'])):
trace.append(go.Box(y=np.array(graph_date['y'][i])[0], name=graph_date['x'][i]))
layout = go.Layout(
title=school_type_or_region + ' --- Salary Distribution',
# 横坐标设置
xaxis={
'title': school_type_or_region,
},
# 纵坐标设置
yaxis={
'title': 'Salary Distribution',
},
margin={'l': 50, 'b': 30, 't': 50, 'r': 0},
height=450,
hovermode='closest'
)
return {
'data': trace,
'layout': layout
}
@app.callback(
dash.dependencies.Output('sunburst_salary_box', 'figure'),
[dash.dependencies.Input('school_type_or_region', 'value')])
def update_sunburst_salary_box_graph(school_type_or_region):
graph_date = get_data(school_type_or_region)
fig = px.sunburst(
graph_date,
path=[school_type_or_region, 'School Name'],
values='Mid-Career Median Salary',
branchvalues='total',
color='Mid-Career Median Salary',
color_continuous_scale='RdBu',
title='sunburst graph',
hover_data=['School Name']
)
fig.layout.height = 600
fig.layout.margin = {'l': 50, 'b': 30, 't': 50, 'r': 0}
fig.layout.title = 'sunburst graph'
fig.layout.hovermode = 'closest'
return {
'data': fig.data,
'layout': fig.layout
}
@app.callback(
dash.dependencies.Output('certain_school', 'figure'),
[dash.dependencies.Input('sunburst_salary_box', 'hoverData'),
dash.dependencies.Input('school_type_or_region', 'value')])
def update_certain_school_graph(hoverData, school_type_or_region):
school_name = hoverData['points'][0]['label'].strip()
graph = get_data(school_type_or_region)
yArray = []
if school_name not in get_school_regions() and school_name not in get_school_types():
dff = graph[graph['School Name'] == school_name]
yArray = np.array(dff)[0][3:]
elif school_name in get_school_types():
yArray = get_media_salary_by_certain_school_type(graph, school_name)
elif school_name in get_school_regions():
yArray = get_media_salary_by_certain_school_region(graph, school_name)
adjust_array_data(yArray)
return {
'data': [go.Scatter(
x=['10', '25', '50', '75', '90'],
y=yArray,
mode='lines+markers',
line=dict(
color='rgba(255, 182, 193)',
width=1
),
marker={
'size': 15,
'opacity': 0.5,
'line': {'width': 0.5, 'color': 'white'}
}
)],
'layout': go.Layout(
title='Salary Of Certain School, School Type Or Region',
# 横坐标设置
xaxis={
'title': 'Mid-Career (x)th Percentile Salary',
'tickmode': 'auto', 'nticks': 10, 'tickwidth': 0.1,
},
# 纵坐标设置
yaxis={
'title': school_name,
},
margin={'l': 60, 'b': 80, 't': 50, 'r': 0},
height=550,
width=700,
hovermode='closest'
)
}
app.css.append_css({
'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css'
})
if __name__ == '__main__':
dataInit()
app.run_server(port=8090)
``` |
{
"source": "1850061/file_manage",
"score": 2
} |
#### File: 1850061/file_manage/fileFunction.py
```python
from FCB import FCB
from FAT import FAT
from utils import *
import math
from queue import Queue
def dirAddFile(parentDir, filename, start):
contentIndex = getContentIndex(parentDir, FAT.nowFat)
fileContent = readFile()
alreadyLen = len(contentIndex)
writeStr = ""
for i in range(alreadyLen):
writeStr = writeStr + fileContent[contentIndex[i]]
writeStr = writeStr.replace('\n', '')
writeLi = writeStr.split('|')
writeList = []
for i in range(len(writeLi) - 1):
writeList.append(writeLi[i])
writeList.append(filename + ' ' + str(start))
writeStr = '|'.join(writeList)
writeStr = writeStr + '|'
nowMemLen = math.floor(len(writeStr) / 16 + 1)
newMenLen = nowMemLen - alreadyLen
freeBlock = FAT.findFreeBlock(newMenLen)
if len(freeBlock) != newMenLen:
return False
allBlock = contentIndex + freeBlock
changeFat(allBlock)
writeContent(writeStr, allBlock)
return True
def createDir(name, parentDir):
freeBlock = FAT.findFreeBlock(3)
changeFat(freeBlock)
if len(freeBlock) != 3:
return "磁盘空间不足,创建失败"
start = freeBlock[0]
isCreatSuccess = True
if parentDir != -1:
isCreatSuccess = dirAddFile(parentDir, name, start)
if not isCreatSuccess:
return "磁盘空间不足,创建失败"
fcbDir = FCB(name, freeBlock[0], 0)
fcbDir.writeFCB()
writeIndex = FAT.getBlockIndex(start, 3)
fileContent = readFile()
writeStr = toNumDigit(parentDir, 3) + '|' + fileContent[writeIndex][4:]
fileContent[writeIndex] = writeStr
with open('fileManage.txt', 'w+') as f:
f.writelines(fileContent)
return "创建成功"
def deleteSingleDir(path, start):
pass
def createTxt(name, start):
str = createDir(name, start)
return str
def changeFat(freeBlock):
for i in range(len(freeBlock) - 1):
FAT.nowFat[freeBlock[i]] = freeBlock[i + 1]
FAT.nowFat[freeBlock[len(freeBlock) - 1]] = -1
def showDir(start):
dirName = findDirFile(start, FAT.nowFat)['name']
list = []
for i in range(len(dirName)):
list.append(dirName[i])
return list
def openDirUnderNow(start, name):
if name.endswith('.txt'):
return '请输入目录名,而非文件名'
fileNames = findDirFile(start, FAT.nowFat)['name']
fileBlock = findDirFile(start, FAT.nowFat)['block']
for i in range(len(fileNames)):
if fileNames[i] == name:
return fileBlock[i]
return '没找到该目录'
def openFileUnderNow(start, name):
fileNames = findDirFile(start, FAT.nowFat)['name']
fileBlock = findDirFile(start, FAT.nowFat)['block']
for i in range(len(fileNames)):
if fileNames[i] == name:
return fileBlock[i]
return '没找到该文件'
def openDir(start, path):
list = path.split('/')
now = start
if list[0] == 'root:':
now = 52
list = list[1:]
for i in range(len(list)):
if list[i] == '.':
continue
elif list[i] == '..':
par = getParDirBlock(now, FAT.nowFat)
if par == -1:
return '您已经在根目录,无法回退'
now = par
elif list[i] == '':
continue
else:
res = openDirUnderNow(now, list[i])
if len(str(res)) > 3:
return res
now = res
return now
def rename(origin, new, nowBlock):
contentIndex = getContentIndex(nowBlock, FAT.nowFat)
alreadyLen = len(contentIndex)
writeStr = findDirStr(nowBlock, FAT.nowFat)
writeStr = writeStr.replace(origin, new)
nowMemLen = math.floor(len(writeStr) / 16 + 1)
newMenLen = nowMemLen - alreadyLen
if newMenLen >= 0:
freeBlock = FAT.findFreeBlock(newMenLen)
if len(freeBlock) != newMenLen:
return '重命名失败,(磁盘空间不足)'
allBlock = contentIndex + freeBlock
changeFat(allBlock)
writeContent(writeStr, allBlock)
else:
dec = alreadyLen - nowMemLen
clearFat(contentIndex[-dec - 1:])
contentIndex = contentIndex[0: -dec]
writeContent(writeStr, contentIndex)
fileIndex = int(openFileUnderNow(nowBlock, new))
fcbInfo = getFCBInfo(fileIndex, FAT.nowFat)
fcb = FCB(new, fcbInfo['start'], fcbInfo['size'])
fcb.writeFCB()
return '重命名成功'
def eraseFat(list):
fileContent = readFile()
for i in range(len(list)):
FAT.nowFat[list[i]] = -2
fileContent[list[i]] = '0' * 16 + '\n'
with open('fileManage.txt', 'w') as f:
f.writelines(fileContent)
def clearFat(list):
fileContent = readFile()
for i in range(len(list)):
if i == 0:
FAT.nowFat[list[i]] = -1
continue
else:
FAT.nowFat[list[i]] = -2
fileContent[list[i]] = '0' * 16 + '\n'
with open('fileManage.txt', 'w') as f:
f.writelines(fileContent)
def readTxt(nowBlock, name):
fileIndex = openFileUnderNow(nowBlock, name)
if fileIndex == '没找到该文件':
return fileIndex
fileIndex = int(fileIndex)
content = findDirStr(fileIndex, FAT.nowFat)[4:-1]
return content
def closeFile(nowBlock):
return
def writeTxt(nowBlock, name, content):
fileIndex = openFileUnderNow(nowBlock, name)
if fileIndex == '没找到该文件':
return fileIndex
content = content + '|'
fileIndex = int(fileIndex)
contentIndex = getContentIndex(fileIndex, FAT.nowFat)
alreadyLen = len(contentIndex)
head = findDirStr(nowBlock, FAT.nowFat)[0:4]
writeStr = findDirStr(nowBlock, FAT.nowFat)[4:-1]
writeStr = head + writeStr.replace(writeStr, content)
nowMemLen = math.floor(len(writeStr) / 16 + 1)
newMenLen = nowMemLen - alreadyLen
if newMenLen >= 0:
freeBlock = FAT.findFreeBlock(newMenLen)
if len(freeBlock) != newMenLen:
return '重命名失败,(磁盘空间不足)'
allBlock = contentIndex + freeBlock
changeFat(allBlock)
writeContent(writeStr, allBlock)
else:
dec = alreadyLen - nowMemLen
clearFat(contentIndex[-dec - 1:])
contentIndex = contentIndex[0: -dec]
writeContent(writeStr, contentIndex)
fcbInfo = getFCBInfo(fileIndex, FAT.nowFat)
fcb = FCB(name, fcbInfo['start'], len(content))
fcb.writeFCB()
return '写入成功'
def seeTxt(nowBlock, name):
fileIndex = openFileUnderNow(nowBlock, name)
if fileIndex == '没找到该文件':
return fileIndex
fileIndex = int(fileIndex)
fcbInfo = getFCBInfo(fileIndex, FAT.nowFat)
return fcbInfo
def dirDeleteFile(parentDir, filename):
contentIndex = getContentIndex(parentDir, FAT.nowFat)
fileContent = readFile()
alreadyLen = len(contentIndex)
writeStr = ""
for i in range(alreadyLen):
writeStr = writeStr + fileContent[contentIndex[i]]
writeStr = writeStr.replace('\n', '')
writeLi = writeStr.split('|')
writeList = []
for i in range(len(writeLi) - 1):
name = writeLi[i].split(' ')[0]
if name == filename:
continue
writeList.append(writeLi[i])
writeStr = '|'.join(writeList)
writeStr = writeStr + '|'
nowMemLen = math.floor(len(writeStr) / 16 + 1)
newMenLen = nowMemLen - alreadyLen
if newMenLen >= 0:
freeBlock = FAT.findFreeBlock(newMenLen)
if len(freeBlock) != newMenLen:
return '重命名失败,(磁盘空间不足)'
allBlock = contentIndex + freeBlock
changeFat(allBlock)
writeContent(writeStr, allBlock)
else:
dec = alreadyLen - nowMemLen
clearFat(contentIndex[-dec - 1:])
contentIndex = contentIndex[0: -dec]
writeContent(writeStr, contentIndex)
return True
def deleteSingleFile(nowBlock, name):
fileIndex = openFileUnderNow(nowBlock, name)
fileIndex = int(fileIndex)
list = getFileIndex(fileIndex, FAT.nowFat)
eraseFat(list)
def deleteFileByBlock(fileIndex):
list = getFileIndex(fileIndex, FAT.nowFat)
eraseFat(list)
def deleteFile(nowBlock, name):
fileIndex = openFileUnderNow(nowBlock, name)
if fileIndex == '没找到该文件':
return fileIndex
fileIndex = int(fileIndex)
deleteList = findDirFile(fileIndex, FAT.nowFat)['block']
deleteQueue = Queue(maxsize=0)
for i in range(len(deleteList)):
if deleteList[i] == '':
continue
deleteQueue.put(int(deleteList[i]))
while not deleteQueue.empty():
now = deleteQueue.get()
if getFileType(now, FAT.nowFat) == '目录':
deleteList = findDirFile(now, FAT.nowFat)['block']
for i in range(len(deleteList)):
if deleteList[i] == '':
continue
deleteQueue.put(int(deleteList[i]))
deleteFileByBlock(now)
else:
deleteFileByBlock(now)
deleteSingleFile(nowBlock, name)
dirDeleteFile(nowBlock, name)
return '删除成功'
```
#### File: 1850061/file_manage/utils.py
```python
def readFile():
fileContent = []
with open('fileManage.txt', 'r') as f:
for line in f.readlines():
fileContent.append(line)
return fileContent
def toNumDigit(num, all):
addZero = all - len(str(num))
return '0' * addZero + str(num)
def eliminateZero(name):
isFirst = True
countZero = 0
for i in range(len(name)):
if name[i] == '0' and isFirst:
countZero += 1
else:
isFirst = False
if name[countZero:] != '':
return name[countZero:]
else:
return '0'
def fileContentInstead(list, index, begin, length, replaceStr):
list[index] = list[index][0:begin] + replaceStr + list[index][begin + length:]
def checkFile(name, start, size):
if 'root:' == name:
return '文件创建失败(文件命名不能与根目录重名)'
for i in range(len(name)):
if name[i] == '0' or name[i] == '|' or name[i] == '/':
return "文件创建失败(文件名中不能包含0,|以及/符号)"
else:
break
for ch in name.encode('utf-8').decode('utf-8'):
if u'\u4e00' <= ch <= u'\u9fff':
return "文件创建失败(文件名中不能包含中文)"
if len(name) > 10:
return "文件创建失败(文件名长度不能超过10)"
if int(start) >= 256:
return "文件创建失败(起始地址错误)"
if size >= 1000:
return "文件创建失败(文件过大)"
return "Success"
def containChinese(content):
for ch in content.encode('utf-8').decode('utf-8'):
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def checkDir(name, start, fat):
if 'root:' == name:
return '目录命名失败(文件命名不能与根目录重名)'
if name.endswith('.txt'):
return '目录命名错误(不能以.txt结尾)'
if len(name) == 0:
return '目录命名错误(目录名不能为空)'
dirInfo = findDirFile(start, fat)
dirNames = dirInfo['name']
for i in range(len(dirNames)):
if name == dirNames[i]:
return '目录命名错误(目录名不允许重名)'
return 'Success'
def getFCBIndex(start, fat):
return [start, fat[start]]
def getFCBInfo(start, fat):
fileContent = readFile()
info = fileContent[start]
sec = fat[start]
time = fileContent[sec]
type = ''
if time[0] == '0':
type = '目录'
else:
type = 'Txt文件'
t = time[1:5] + '-' + time[5:7] + '-' + time[7:9] + ' ' + time[9:11] + '-' + time[11:13] + '-' + time[13:15] + ' '
return {'name': eliminateZero(info[0:10]), 'start': int(eliminateZero(info[10:13])),
'size': int(eliminateZero(info[13:16])), 'type': type, 'lastModifyTime': t}
def getFileType(startBlock, fat):
info = getFCBInfo(startBlock, fat)
return info['type']
# 获取内容的编号
def getContentIndex(start, fat):
index = []
now = fat[fat[start]]
while now != -1:
index.append(now)
now = fat[now]
return index
def getFileIndex(start, fat):
return getFCBIndex(start, fat) + getContentIndex(start, fat)
def writeContent(str, blockList):
fileContent = readFile()
for i in range(len(blockList) - 1):
fileContent[blockList[i]] = str[16 * i: 16 * i + 16] + '\n'
lastLen = len(str) % 16
fileContent[blockList[len(blockList) - 1]] = str[-lastLen:] + "0" * (16 - lastLen) + '\n'
with open('fileManage.txt', 'w') as f:
f.writelines(fileContent)
def getParDirBlock(start, fat):
fileContent = readFile()
par = fileContent[getContentIndex(start, fat)[0]][0:3]
res = 0
if par == '000':
res = 0
else:
isFirst = True
countZero = 0
for i in range(len(par)):
if par[i] == '0' and isFirst:
countZero += 1
else:
isFirst = False
res = int(par[countZero:])
return res
def findDirFile(start, fat):
contentIndex = getContentIndex(start, fat)
fileContent = readFile()
alreadyLen = len(contentIndex)
writeStr = ""
for i in range(alreadyLen):
writeStr = writeStr + fileContent[contentIndex[i]]
writeStr = writeStr.replace('\n', '')
writeLi = writeStr.split('|')
writeList = []
for i in range(len(writeLi) - 1):
if i == 0:
continue
writeList.append(writeLi[i])
name = []
block = []
for i in range(len(writeList)):
temp = writeList[i].split(' ')
name.append(temp[0])
block.append(temp[1])
return {'name': name, 'block': block}
def findDirStr(start, fat):
contentIndex = getContentIndex(start, fat)
fileContent = readFile()
alreadyLen = len(contentIndex)
writeStr = ""
for i in range(alreadyLen):
writeStr = writeStr + fileContent[contentIndex[i]]
writeStr = writeStr.replace('\n', '')
writeLi = writeStr.split('|')
writeList = []
for i in range(len(writeLi) - 1):
writeList.append(writeLi[i])
writeStr = '|'.join(writeList)
writeStr = writeStr + '|'
return writeStr
def canRename(origin, new, start, fat):
res = checkFile(new, start, 0)
res = res.replace('创建', '重命名')
if res == 'Success':
pass
else:
return res
if 'root:' == new:
return '重命名失败(文件命名不能与根目录重名)'
isDirOri = True
isDirNew = True
if origin.endswith('.txt'):
isDirOri = False
if new.endswith('.txt'):
isDirNew = False
if isDirOri != isDirNew:
return '重命名失败(重命名不能改变文件类型)'
names = findDirFile(start, fat)['name']
isAppear = False
for i in range(len(names)):
if new == names[i]:
return '重命名失败(以该文件名命名得文件已存在)'
if origin == names[i]:
isAppear = True
if isAppear:
return 'Success'
return '重命名失败(该文件不存在)'
def checkPath(path):
for i in range(len(path)):
if path[i] == '\\':
return 'path中不应该出现\\符号'
return 'Success'
def combinePath(nowPath, addPath):
res = []
now = nowPath.split('/')
add = addPath.split('/')
if add[0] == 'root:':
if addPath.endswith('/'):
return addPath
return addPath + '/'
for i in range(len(add)):
if add[len(add) - i - 1] == '.':
del add[len(add) - i - 1]
back = 1
for i in range(len(add)):
if add[i] == '..':
back = back + 1
res = res + now[0: -back]
res = res + add[back - 1:]
path = '/'.join(res)
if path.endswith('/'):
return path
return path + '/'
``` |
{
"source": "1850061/image_retrieval",
"score": 3
} |
#### File: image_retrieval/server/rest-server.py
```python
from flask import Flask, jsonify, abort, request, make_response, url_for, redirect, render_template
from flask_httpauth import HTTPBasicAuth
from werkzeug.utils import secure_filename
import os
import star
import shutil
import numpy as np
from search import recommend
from getTypes import get_types
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
from tensorflow.python.platform import gfile
app = Flask(__name__, static_url_path="")
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.jinja_env.variable_start_string = '{['
app.jinja_env.variable_end_string = ']}'
auth = HTTPBasicAuth()
# ==============================================================================================================================
#
# Loading the extracted feature vectors for image retrieval
#
#
# ==============================================================================================================================
extracted_features = np.zeros((10000, 2048), dtype=np.float32)
with open('saved_features_recom.txt') as f:
for i, line in enumerate(f):
extracted_features[i, :] = line.split()
print("loaded extracted_features")
# ==============================================================================================================================
#
# This function is used to do the image search/image retrieval
#
# ==============================================================================================================================
@app.route('/imgUpload', methods=['GET', 'POST'])
# def allowed_file(filename):
# return '.' in filename and \
# filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def upload_img():
print("image upload")
result = 'static/result'
if not gfile.Exists(result):
os.mkdir(result)
shutil.rmtree(result)
if request.method == 'POST' or request.method == 'GET':
print(request.method)
# check if the post request has the file part
if 'file' not in request.files:
print('No file part')
return redirect(request.url)
file = request.files['file']
print(file.filename)
# if user does not select file, browser also
# submit a empty part without filename
if file.filename == '':
print('No selected file')
return redirect(request.url)
if file: # and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
inputloc = os.path.join(app.config['UPLOAD_FOLDER'], filename)
print(inputloc)
recommend(inputloc, extracted_features)
os.remove(inputloc)
image_path = "/result"
image_list = [os.path.join(image_path, file) for file in os.listdir(result)
if not file.startswith('.')]
images = {
'image0': image_list[0],
'image1': image_list[1],
'image2': image_list[2],
'image3': image_list[3],
'image4': image_list[4],
'image5': image_list[5],
'image6': image_list[6],
'image7': image_list[7],
'image8': image_list[8]
}
print(images)
return jsonify(images)
@app.route('/getTypes', methods=['GET', 'POST'])
def get_type():
print("get type")
imageList = request.values.get('imageList')
print(imageList)
if request.method == 'POST' or request.method == 'GET':
print(request.method)
typeList = get_types(imageList)
return jsonify({'typeList': typeList})
@app.route('/getStar', methods=['GET', 'POST'])
def get_star():
print("get star")
imageList = star.get_star_list()
return jsonify({'imageList': imageList})
@app.route('/delStar', methods=['GET', 'POST'])
def del_star():
print("del star")
imageDel = request.values.get('imageDel')
print(imageDel)
star.del_star(imageDel)
imageList = star.get_star_list()
return jsonify({'imageList': imageList})
@app.route('/addStar', methods=['GET', 'POST'])
def add_star():
print("add star")
imageStar = request.values.get('imageStar')
star.add_star(imageStar)
imageList = star.get_star_list()
return jsonify({'imageList': imageList})
# ==============================================================================================================================
#
# Main function #
#
# ==============================================================================================================================
@app.route("/")
def main():
return render_template("main.html")
@app.route("/collect")
def collect():
return render_template("collection.html")
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
```
#### File: image_retrieval/server/star.py
```python
import os
import matplotlib.image as mpimg
from tensorflow.python.platform import gfile
basePath = os.getcwd()
def getTagName(filename):
fir = filename.split('\\')[-1]
end = fir.split('/')[-1]
return end
def add_star(imageStar):
baseFile = 'static\\result'
for filepath, dirnames, filenames in os.walk(baseFile):
for filename in filenames:
if getTagName(filename) == getTagName(imageStar):
starBase = 'static/star'
if not gfile.Exists(starBase):
os.mkdir(starBase)
readFile = basePath + '\\' + os.path.join(filepath, filename)
writeFile = basePath + '\\' + os.path.join(starBase, filename)
print(writeFile)
image = mpimg.imread(readFile)
mpimg.imsave(writeFile, image)
return True
def del_star(imageDel):
baseFile = 'static\\star'
for filepath, dirnames, filenames in os.walk(baseFile):
for filename in filenames:
if getTagName(filename) == getTagName(imageDel):
starBase = 'static/star'
if not gfile.Exists(starBase):
os.mkdir(starBase)
delFile = basePath + '\\' + os.path.join(filepath, filename)
print(delFile)
os.remove(delFile)
return True
def get_star_list():
imageList = []
baseFile = 'static\\star'
for filepath, dirnames, filenames in os.walk(baseFile):
for filename in filenames:
imageList.append('star\\' + filename)
return imageList
``` |
{
"source": "1850061/Speech-Recognition",
"score": 2
} |
#### File: 1850061/Speech-Recognition/asrInterface.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QMovie
from PyQt5.QtWidgets import QAction, QPushButton
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(314, 462)
MainWindow.setStyleSheet("background-color: black;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
# 先坐标,后长宽
self.label_3.setGeometry(QtCore.QRect(60, 240, 201, 56))
font = self.setFont("Calibri", 14)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(0, 117, 210);")
self.label_3.setWordWrap(True)
self.label_3.setObjectName("label_3")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(60, 210, 201, 21))
font = self.setFont("Calibri", 14)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(0, 117, 210);")
self.label_2.setWordWrap(True)
self.label_2.setObjectName("label_2")
self.voiceFig = QtWidgets.QLabel(self.centralwidget)
self.voiceFig.setGeometry(QtCore.QRect(70, 10, 161, 121))
self.voiceFig.setText("")
self.gif = QMovie("icon/voice.gif")
self.voiceFig.setMovie(self.gif)
self.gif.start()
self.voiceFig.setScaledContents(True)
self.voiceFig.setObjectName("voiceFig")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(60, 120, 214, 80))
font = self.setFont("Calibri", 14)
font.setStyleStrategy(QtGui.QFont.PreferAntialias)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(0, 117, 210);")
self.label.setTextFormat(QtCore.Qt.AutoText)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(60, 310, 201, 80))
font = self.setFont("Calibri", 14)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color: rgb(0, 117, 210);")
self.label_4.setWordWrap(True)
self.label_4.setObjectName("label_4")
self.label_sayWhat = QtWidgets.QLabel(self.centralwidget)
self.label_sayWhat.setGeometry(QtCore.QRect(60, 400, 201, 26))
font = self.setFont("Calibri", 14)
self.label_sayWhat.setFont(font)
self.label_sayWhat.setStyleSheet("color: green;")
self.label_sayWhat.setWordWrap(True)
self.label_sayWhat.setObjectName("label_sayWhat")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setStyleSheet("color: green;")
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "语音助手"))
self.label_3.setText(_translate("MainWindow", "1. 说\"音乐\"来听音乐"))
self.label_2.setText(_translate("MainWindow", "你可以:"))
self.label.setText(_translate("MainWindow", "您好?有什么可以帮到您的吗?"))
self.label_4.setText(_translate("MainWindow", "2. 说 \"笔记本\"来打开笔记本"))
self.label_sayWhat.setText(_translate("MainWindow", "您没说话"))
# 添加字体设置函数
def setFont(self, typeface, fontSize) -> QtGui.QFont():
font = QtGui.QFont()
font.setFamily(typeface)
font.setPointSize(fontSize)
return font
def updateSayWhat(self, say):
self.updateBar("")
_translate = QtCore.QCoreApplication.translate
self.label_sayWhat.setText(_translate("MainWindow", say))
def updateBar(self, say):
self.statusbar.showMessage(say)
``` |
{
"source": "18520339/facebook-crawling",
"score": 3
} |
#### File: facebook-crawling/2 - Automation tools with IP hiding techniques/page.py
```python
from browser import *
import time
POSTS_SELECTOR = '[class="_427x"] .userContentWrapper'
COMMENTABLE_SELECTOR = f'{POSTS_SELECTOR} .commentable_item'
FILTER_CMTS = type('Enum', (), {
'MOST_RELEVANT': 'RANKED_THREADED',
'NEWEST': 'RECENT_ACTIVITY',
'ALL_COMMENTS': 'RANKED_UNFILTERED'
})
def timer(func):
def wrapper(*args, **kwargs):
start = time.time()
func(*args, **kwargs)
end = time.time()
print('=> Loading time:', end - start)
return wrapper
def click_popup(selector):
btn = find_all(S(selector))
if btn != []: click(btn[0])
def failed_to_load(driver, page_url):
if driver.current_url not in page_url:
print('Redirect detected => Rerun\n')
return True
elif find_all(S('#main-frame-error')) != []:
print('Cannot load page => Rerun\n')
return True
return False
@timer
def load_more_posts(driver):
driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
while find_all(S('.async_saving [role="progressbar"]')) != []: pass
time.sleep(random.randint(3, 7))
@timer
def click_multiple_buttons(driver, selector):
for button in driver.find_elements_by_css_selector(selector):
driver.execute_script('arguments[0].click()', button)
while find_all(S(f'{COMMENTABLE_SELECTOR} [role="progressbar"]')) != []: pass
time.sleep(random.randint(3, 7))
def filter_comments(driver, by):
if by == FILTER_CMTS.MOST_RELEVANT: return
click_multiple_buttons(driver, '[data-ordering="RANKED_THREADED"]')
click_multiple_buttons(driver, f'[data-ordering="{by}"]')
def load(driver, page_url, scroll_down=0, filter_cmts_by=FILTER_CMTS.MOST_RELEVANT, view_more_cmts=0, view_more_replies=0):
print('Click Accept Cookies button')
click_popup('[title="Accept All"]')
for i in range(min(scroll_down, 3)):
print(f'Load more posts times {i + 1}/{scroll_down}')
load_more_posts(driver)
if failed_to_load(driver, page_url): return False
print('Click Not Now button')
click_popup('#expanding_cta_close_button')
for i in range(scroll_down - 3):
print(f'Load more posts times {i + 4}/{scroll_down}')
load_more_posts(driver)
if failed_to_load(driver, page_url): return False
print('Filter comments by', filter_cmts_by)
filter_comments(driver, filter_cmts_by)
for i in range(view_more_cmts):
print(f'Click View more comments buttons times {i + 1}/{view_more_cmts}')
click_multiple_buttons(driver, f'{COMMENTABLE_SELECTOR} ._7a94 ._4sxc')
if failed_to_load(driver, page_url): return False
for i in range(view_more_replies):
print(f'Click Replies buttons times {i + 1}/{view_more_replies}')
click_multiple_buttons(driver, f'{COMMENTABLE_SELECTOR} ._7a9h ._4sxc')
if failed_to_load(driver, page_url): return False
print('Click See more buttons of comments')
click_multiple_buttons(driver, f'{COMMENTABLE_SELECTOR} .fss')
if failed_to_load(driver, page_url): return False
return True
``` |
{
"source": "18520339/vietnamese-nom-script",
"score": 3
} |
#### File: Data labeling/Auto annotation/bbox_handler.py
```python
from scipy.spatial import distance
from functools import reduce
import numpy as np
import operator
import math
import cv2
class BoundingBoxHandler:
# https://stackoverflow.com/questions/51074984/sorting-according-to-clockwise-point-coordinates
@staticmethod
def BlhsingOrderPoints(points):
center = tuple(map(
operator.truediv,
reduce(lambda x, y: map(operator.add, x, y), points),
[len(points)] * 2
))
return sorted(points, key=lambda point: (
-135 - math.degrees(math.atan2(*tuple(
map(operator.sub, point, center)
)))) % 360)
# https://pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv
@staticmethod
def AdrianOrderPoints(points):
# Sort the points based on their x-coordinates
xSorted = points[np.argsort(points[:, 0]), :]
# Grab the left-most and right-most points from the sorted x-roodinate points
leftMost, rightMost = xSorted[:2, :], xSorted[2:, :]
# Now, sort the left-most coordinates according to their y-coordinates
# so we can grab the top-left and bottom-left points, respectively
leftMost = leftMost[np.argsort(leftMost[:, 1]), :]
(tl, bl) = leftMost
# Use the top-left coordinate as an anchor to calculate the Euclidean distance
# between the top-left and right-most points; by the Pythagorean theorem,
# the point with the largest distance will be our bottom-right point
D = distance.cdist(tl[np.newaxis], rightMost, 'euclidean')[0]
(br, tr) = rightMost[np.argsort(D)[::-1], :]
# Return the coordinates in top-left, top-right, bottom-right, and bottom-left order
return np.array([tl, tr, br, bl], dtype='float32')
# https://pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example
@staticmethod
def RectangleTransform(points):
quadrangle = BoundingBoxHandler.AdrianOrderPoints(np.array(points))
(tl, tr, br, bl) = quadrangle
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB)) + tl[0] - 1
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB)) + tl[1] - 1
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[tl[0], tl[1]],
[maxWidth, tl[1]],
[maxWidth, maxHeight],
[tl[0], maxHeight]
], dtype='float32')
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(quadrangle, dst)
transformed = cv2.perspectiveTransform(np.array([quadrangle]), M)
return transformed[0].tolist()
# https://cristianpb.github.io/blog/image-rotation-opencv
@staticmethod
def RotateOneBox(file_name, bbox, angle):
image = cv2.imread(file_name)
height, width = image.shape[:2]
center_x, center_y = width / 2, height / 2
for idx, point in enumerate(bbox['points']):
# OpenCV calculates standard transformation matrix
M = cv2.getRotationMatrix2D((center_x, center_y), angle, 1.0)
# Grab the rotation components of the matrix)
cos, sin = np.abs(M[0, 0]), np.abs(M[0, 1])
# Compute the new bounding dimensions of the image
new_width = (height * sin) + (width * cos)
new_height = (height * cos) + (width * sin)
# Adjust the rotation matrix to take into account translation
M[0, 2] += (new_width / 2) - center_x
M[1, 2] += (new_height / 2) - center_y
# Perform the actual rotation and return the image
calculated = M @ [point[0], point[1], 1]
bbox['points'][idx] = (calculated[0], calculated[1])
return bbox
# https://pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python
@staticmethod
def NonMaximumSuppression(bboxes, threshold):
if len(bboxes) == 0: return []
points_in_bboxes = np.array([bbox['points'] for bbox in bboxes])
pick_idxs = [] # Initialize the list of picked indexes
pick_bboxes = []
x1 = points_in_bboxes[:, 0, 0] # x coordinate of the top-left corner
y1 = points_in_bboxes[:, 0, 1] # y coordinate of the top-left corner
x2 = points_in_bboxes[:, 2, 0] # x coordinate of the bottom-right corner
y2 = points_in_bboxes[:, 2, 1] # y coordinate of the bottom-right corner
# Compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# Keep looping while some indexes still remain in the indexes list
while len(idxs) > 0:
# Grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick_idxs.append(i)
pick_bboxes.append(bboxes[i])
# Find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# Compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# Compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# Delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate((
[last],
np.where(overlap > threshold)[0]
)))
# Return only the bounding boxes that
# were picked using the integer data type
return pick_bboxes
@staticmethod
def WidthOverHeightFilter(bboxes, max_ratio=0.5):
new_bboxes = []
for bbox in bboxes:
(tl, tr, br, bl) = bbox['points']
w = max(0, br[0] - tl[0] + 1)
h = max(0, br[1] - tl[1] + 1)
if w / h < max_ratio: new_bboxes.append(bbox)
return new_bboxes
```
#### File: Data labeling/Auto annotation/unrotated_convertor.py
```python
from argparse import ArgumentParser
from bbox_handler import BoundingBoxHandler
import json
import sys
import os
ap = ArgumentParser()
ap.add_argument('-i', '--input', required=True, help='File that contains PPOCR rotated bboxes')
ap.add_argument('-o', '--output', required=True, help='File name after combine rotated bboxes')
ap.add_argument(
'-d',
'--direction',
required = True,
choices = ['+90', '-90', 'both'],
help = 'Current right angle direction of input images'
)
ap.add_argument(
'--max_woh',
required = 'both' in sys.argv[-1],
type = float,
help = '(Required if direction == "both") Maximum ratio width over height to filter'
)
ap.add_argument(
'--overlap',
required = 'both' in sys.argv[-1],
type = float,
help = '(Required if direction == "both") Overlap threshold to suppress'
)
args = vars(ap.parse_args())
'''Example:
python unrotated_convertor.py \
-i "../../Dataset/Tale of Kieu version 1871 - Rotate/Cache.cach" \
-o "../../Dataset/Tale of Kieu version 1871/Cache.cach" \
-d "both" \
--max_woh 0.25 \
--overlap 0.5
'''
script_dir = os.path.dirname(os.path.abspath(__file__))
input_path = os.path.join(script_dir, args['input'])
output_path = os.path.join(script_dir, args['output'])
def rotate_bboxes_to_0deg(image_idx, file_path, bboxes):
angle = int(os.path.splitext(file_path)[0][-3:])
if args['direction'] == 'both':
if (image_idx % 2 == 0 and angle != 90) or \
(image_idx % 2 == 1 and angle != -90):
raise Exception('''
\nImage must have the following format:
\n- "+90" postfix in name for even index
\n- "-90" postfix in name for odd index
''')
elif int(args['direction']) != angle:
raise Exception('Image not meet current right angle direction')
for idx, bbox in enumerate(bboxes):
absolute_path = os.path.join(
os.path.dirname(input_path),
os.path.basename(file_path) # Get file name
)
bboxes[idx] = BoundingBoxHandler.RotateOneBox(absolute_path, bbox, -angle)
bboxes[idx]['points'] = BoundingBoxHandler.RectangleTransform(bboxes[idx]['points'])
print('Rotated', file_path, 'bouding boxes to 0 degree')
return bboxes
with open(input_path, 'r', encoding='utf-8') as file:
dataset_bboxes = {}
for line in file:
file_path, bboxes = line.rstrip('\n').split('\t')
dataset_bboxes[file_path] = json.loads(bboxes)
with open(output_path, 'w', encoding='utf-8') as file:
if args['direction'] in ['+90', '-90']:
for image_idx, item in enumerate(dataset_bboxes.items()):
file_path, bboxes = item
final_path = file_path.replace(args['direction'], '').replace(' - Rotate', '')
bboxes = rotate_bboxes_to_0deg(image_idx, file_path, bboxes)
bboxes = BoundingBoxHandler.WidthOverHeightFilter(bboxes, max_ratio=args['max_woh'])
file.write(f'{final_path}\t{bboxes}\n')
elif args['direction'] == 'both':
dataset_length = len(dataset_bboxes)
if dataset_length % 2 != 0:
raise Exception('Number of images to rotate must be even')
items = list(dataset_bboxes.items())
for image_idx in range(0, dataset_length, 2):
file_path_1, bboxes_1 = items[image_idx] # for +90 degree
file_path_2, bboxes_2 = items[image_idx + 1] # for -90 degree
final_path = file_path_1.replace('+90', '').replace(' - Rotate', '')
bboxes_1 = rotate_bboxes_to_0deg(image_idx, file_path_1, bboxes_1)
bboxes_2 = rotate_bboxes_to_0deg(image_idx + 1, file_path_2, bboxes_2)
final_bboxes = BoundingBoxHandler.WidthOverHeightFilter(
bboxes_1 + bboxes_2,
max_ratio = args['max_woh']
)
final_bboxes = BoundingBoxHandler.NonMaximumSuppression(
final_bboxes,
threshold = args['overlap'],
)
print('=> Merged', 'rotated bouding boxes for', final_path)
file.write(f'{final_path}\t{final_bboxes}\n')
```
#### File: vietnamese-nom-script/Text recognition/callbacks.py
```python
import tensorflow as tf
class EarlyStoppingWithStuck(tf.keras.callbacks.Callback):
def __init__(self, patience=0, stuck_str=None):
super(EarlyStoppingWithStuck, self).__init__()
self.patience = patience
self.stuck_str = stuck_str
self.best_weights = None
def on_train_begin(self, logs=None):
self.wait = 0 # Number of epoch it has waited when loss is no longer minimum
self.stopped_epoch = 0 # The epoch the training stops at
self.best_loss = float('inf') # Initialize the best loss as infinity
self.best_epoch = 0
def on_epoch_end(self, epoch, logs=None):
loss, val_loss = logs.get('loss'), logs.get('val_loss')
if self.stuck_str: self.stuck_str = eval(self.stuck_str)
if tf.less(val_loss, self.best_loss) and not self.stuck_str:
self.wait = 0
self.best_loss = val_loss
self.best_epoch = epoch
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0:
print(f'Early stopping and restored the model weights from the end of '
f'epoch {self.best_epoch + 1} - val_loss: {self.best_loss}\n')
```
#### File: vietnamese-nom-script/Text recognition/visualizer.py
```python
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from matplotlib.font_manager import FontProperties
def draw_predicted_text(label, pred_label, fontdict, text_x):
label = label.replace('[UNK]', '?')
label_length, pred_length = len(label), len(pred_label)
if pred_label == label:
fontdict['color'] = 'green'
plt.text(text_x, 0, '\n'.join(pred_label), fontdict=fontdict)
return
pred_start, start, end = 0, 0, 0
while start <= end < label_length:
text_y = end * label_length * 5
actual_char = '[UNK]' if label[end] == '?' else label[end]
if label[start:end + 1] in pred_label[pred_start:pred_length]:
fontdict['color'] = 'dodgerblue'
plt.text(text_x, text_y, actual_char, fontdict=fontdict)
else:
if end < pred_length and end + 1 < label_length and \
pred_label[end] == label[end + 1]:
fontdict['color'] = 'gray'
plt.text(text_x, text_y, actual_char, fontdict=fontdict)
elif end < pred_length:
fontdict['color'] = 'red'
plt.text(text_x, text_y, pred_label[end], fontdict=fontdict)
fontdict['color'] = 'black'
plt.text(text_x + 35, text_y, actual_char, fontdict=fontdict)
else:
fontdict['color'] = 'gray'
plt.text(text_x, text_y, actual_char, fontdict=fontdict)
pred_start = end
start = end + 1
end += 1
def visualize_images_labels(
img_paths,
labels, # shape == (batch_size, max_length)
pred_labels = None, # shape == (batch_size, max_length)
figsize = (15, 8),
subplot_size = (2, 8), # tuple: (rows, columns) to display
legend_loc = None, # Only for predictions,
annotate_loc = None, # Only for predictions
font_path = None,
text_x = None # Position to plot actual label
):
nrows, ncols = subplot_size
num_of_labels = len(labels)
assert len(img_paths) == num_of_labels, 'img_paths and labels must have same number of items'
assert nrows * ncols <= num_of_labels, f'nrows * ncols must be <= {num_of_labels}'
fontdict = {
'fontproperties': FontProperties(fname=font_path),
'fontsize': 18,
'color': 'black',
'verticalalignment': 'top',
'horizontalalignment': 'left'
}
plt.figure(figsize=figsize)
for i in range(min(nrows * ncols, num_of_labels)):
plt.subplot(nrows, ncols, i + 1)
image, label = plt.imread(img_paths[i]), labels[i]
plt.imshow(image)
fontdict['color'] = 'black' # Reset the color
if pred_labels: draw_predicted_text(label, pred_labels[i], fontdict, text_x)
else: plt.text(text_x, 0, '\n'.join(label), fontdict=fontdict)
plt.axis('off')
if legend_loc and annotate_loc and pred_labels:
plt.subplots_adjust(left=0, right=0.75)
plt.legend(handles=[
Patch(color='green', label='Full match'),
Patch(color='dodgerblue', label='Character match'),
Patch(color='red', label='Wrong prediction'),
Patch(color='black', label='Actual character'),
Patch(color='gray', label='Missing position'),
], loc=legend_loc)
annotate_text = [f'{idx + 1:02d}. {text}' for idx, text in enumerate(pred_labels)]
plt.annotate(
f'Model predictions:\n{chr(10).join(annotate_text)}',
fontproperties = FontProperties(fname=font_path),
xycoords = 'axes fraction',
fontsize = 14,
xy = annotate_loc,
)
def plot_training_results(history, save_name, figsize=(16, 14), subplot_size=(2, 2)):
nrows, ncols = subplot_size
if 'lr' in history.keys(): del history['lr']
assert nrows * ncols <= len(history), f'nrows * ncols must be <= {len(history)}'
fig = plt.figure(figsize=figsize)
for idx, name in enumerate(history):
if 'val' in name: continue
plt.subplot(nrows, ncols, idx + 1)
plt.plot(history[name], linestyle='solid', marker='o', color='crimson', label='Train')
plt.plot(history[f'val_{name}'], linestyle='solid', marker='o', color='dodgerblue', label='Validation')
plt.xlabel('Epochs', fontsize=14)
plt.ylabel(name, fontsize=14)
title = name.replace('acc', 'accuracy')\
.replace('seq_', 'sequence_')\
.replace('char_', 'character_')\
.replace('lev_', 'levenshtein_')\
.replace('edit_', 'levenshtein_')\
.replace('_', ' ').capitalize()
plt.title(title, fontsize=18)
plt.legend(loc='best')
fig.savefig(save_name, bbox_inches='tight')
plt.show()
``` |
{
"source": "18566208560/data_exchange",
"score": 2
} |
#### File: app/resources/auth.py
```python
import uuid, time
from flask import g, request, current_app
from werkzeug.datastructures import FileStorage
from app.models.user import UserModel
from app.resources import BaseResource, ApiResource
from app.services.response import res_json
from app.repositories.user import UserRepository
from app.utils.hash import gen_md5
from app.utils.helpers import get_client_ip, not_empty_string, genrate_jwt_token
from app.utils.files import remove_file_by_link, save_uploaded_image
from app.services.decorators import api_auth
class AuthLoginResource(BaseResource):
"""
用户登录
"""
def post(self):
self.parser.add_argument('username', type=not_empty_string, location="json", required=True)
self.parser.add_argument('password', type=not_empty_string, location="json", required=True)
args = self.parse_args()
username = args.get('username')
password = <PASSWORD>(args.get('password'))
user = UserModel.get_model_by_fields(username=username, password=password, deleted_at=0)
if user is None:
return res_json(code='invalid_auth_params')
try:
# 填写当前登录信息
user.login_ip = get_client_ip(request)
user.login_time = int(time.time())
user_data = user.res_format(password=False, token_expires=False, deleted_at=False)
# 更新 Api token
new_token = gen_md5(str(uuid.uuid4()))
UserRepository.update_user_token_by_model(user, new_token)
user_data['api_token'] = genrate_jwt_token(new_token, current_app.config.get("SECRET_KEY"),
current_app.config.get("USER_VALIDITY"))
except Exception as e:
current_app.logger.error(e)
return res_json(code='user_login_fail')
return res_json(data=user_data)
class AuthLogoutResource(ApiResource):
"""
用户登出
"""
def post(self):
try:
UserRepository.update_user_token_by_model(g.user, '')
except Exception as e:
current_app.logger.error(e)
return res_json(code='user_logout_fail')
return res_json()
class AuthProfileResource(ApiResource):
"""
用户基本信息
"""
def get(self):
"""获取认证用户信息"""
return res_json(data=UserRepository.gen_profile_user_data(g.user))
def patch(self):
"""更新认证用户信息"""
self.parser.add_argument('display_name', type=not_empty_string, location='form', required=True)
self.parser.add_argument('old_password', type=str, location='form', trim=True, default='')
self.parser.add_argument('new_password', type=str, location='form', trim=True, default='')
self.parser.add_argument('new_password2', type=str, location='form', trim=True, default='')
self.parser.add_argument('image', type=FileStorage, location='files')
args = self.parse_args()
# 检查姓名格式
if not UserRepository.check_display_name(args.get('display_name')):
return res_json(code='invalid_display_name')
# 检查是否修改密码
if args.get('old_password') != '':
if gen_md5(args.get('old_password')) != g.user.password:
return res_json(code='invalid_old_password')
if args.get('new_password') == '' or args.get('new_password2') == '' or args.get('new_password') != args.get('new_password2'):
return res_json(code='invalid_new_password')
# 检查密码格式
if args.get('new_password') == g.user.username:
return res_json(code='same_username_password')
if not UserRepository.check_password(args.get('new_password')):
return res_json(code='invalid_password')
args['api_token'] = '' # 已经修改了密码, 需要重新登录
args['new_password'] = <PASSWORD>(args.get('new_password'))
else:
args['api_token'] = g.user.api_token
args['new_password'] = g.user.password
# 检查头像后缀
# if args.get('image') is not None:
# if not check_image_extension(args.get('image')):
# return res_json(code='invalid_image_extension')
old_image = g.user.image
try:
# 头像
args['image'] = save_uploaded_image(current_app, args.get('image'), 'profile_')
UserRepository.update_user_profile_by_model(g.user, args.get('display_name'), args.get('new_password'),
args.get('api_token'), args.get('image'))
except Exception as e:
current_app.logger.error(e)
remove_file_by_link(current_app, args['image'])
return res_json(code='update_profile_fail')
# 清除旧头像文件
if args.get('image') is not None:
remove_file_by_link(current_app, old_image)
# # 添加日志
# UserLogRepository.create_user_log(g.user.id, g.user.id, UserLogModel.TYPE_USER, UserLogModel.ACTION_UPDATE,
# get_client_ip(request))
# 返回更新后的用户信息
user = UserRepository.get_user_by_id(g.user.id)
return res_json(data=UserRepository.gen_profile_user_data(user))
``` |
{
"source": "18600130137/python_order",
"score": 4
} |
#### File: python_order/Select/heap.py
```python
from utils import *
# raw_list=[9,1,2,5,7,4,8,6,3,5]
#[1, 2, 3, 4, 5, 5, 6, 7, 8, 9]
def heap_sort_ajust(raw_list, i, len_raw):
child=2*i+1
while child<len_raw:
if child+1<len_raw and raw_list[child]<raw_list[child+1]:
child+=1
if raw_list[i]<raw_list[child]:
raw_list[i],raw_list[child]=raw_list[child],raw_list[i]
i=child
child=2*i+1
else:
break
@performance
def heap_sort(raw_list):
len_raw=len(raw_list)
for i in range(len_raw//2-1,-1,-1):
heap_sort_ajust(raw_list, i, len_raw)
for i in range(len_raw-1,0,-1):
raw_list[0],raw_list[i]=raw_list[i],raw_list[0]
heap_sort_ajust(raw_list, 0, i)
```
#### File: python_order/Swap/bubble.py
```python
from utils import *
@performance
def bubble_sort(raw_list):
len_list=len(raw_list)
for i in range(len_list):
for j in range(1,len_list-i):
if raw_list[j]<raw_list[j-1]:
raw_list[j-1],raw_list[j]=raw_list[j],raw_list[j-1]
return raw_list
```
#### File: python_order/Swap/quick.py
```python
from utils import *
def quick_core(raw_list, low, high):
if low>=high:
return
left=low
right=high
key=raw_list[left]
while left<right:
while left<right and raw_list[right]>=key:
right-=1
raw_list[left]=raw_list[right]
while left<right and raw_list[left]<=key:
left+=1
raw_list[right]=raw_list[left]
raw_list[left]=key
quick_core(raw_list, low, left - 1)
quick_core(raw_list, left + 1, high)
@performance
def quick_sort(raw_list):
len_raw = len(raw_list)
quick_core(raw_list,0,len_raw-1)
```
#### File: 18600130137/python_order/utils.py
```python
import time
import numpy as np
def performance(f):
def fn(*args,**kw):
t_start=time.time()
r=f(*args,**kw)
t_end=time.time()
print('call %s() in %fs'%(f.__name__,t_end-t_start))
return r
return fn
@performance
def random_array(array_len):
array_len=int(array_len)
return np.random.randint(array_len,size=array_len)
``` |
{
"source": "18600575648/myems",
"score": 2
} |
#### File: myems-api/core/version.py
```python
import falcon
import simplejson as json
class VersionItem:
@staticmethod
def __init__():
""""Initializes VersionItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
result = {"version": 'MyEMS v1.9.2',
"release-date": '2022-05-22',
"website": "https://myems.io"}
resp.text = json.dumps(result)
``` |
{
"source": "18621579069/PaddleHub-yu",
"score": 2
} |
#### File: autodl/DELTA/main.py
```python
import os
import time
import sys
import math
import numpy as np
import functools
import re
import logging
import glob
import paddle
import paddle.fluid as fluid
from models.resnet import ResNet101
from datasets.readers import ReaderConfig
# import cv2
# import skimage
# import matplotlib.pyplot as plt
# from paddle.fluid.core import PaddleTensor
# from paddle.fluid.core import AnalysisConfig
# from paddle.fluid.core import create_paddle_predictor
from args import args
from datasets.data_path import global_data_path
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if args.seed is not None:
np.random.seed(args.seed)
print(os.environ.get('LD_LIBRARY_PATH', None))
print(os.environ.get('PATH', None))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def load_vars_by_dict(executor, name_var_dict, main_program=None):
from paddle.fluid.framework import Program, Variable
from paddle.fluid import core
load_prog = Program()
load_block = load_prog.global_block()
if main_program is None:
main_program = fluid.default_main_program()
if not isinstance(main_program, Program):
raise TypeError("program should be as Program type or None")
for each_var_name in name_var_dict.keys():
assert isinstance(name_var_dict[each_var_name], Variable)
if name_var_dict[each_var_name].type == core.VarDesc.VarType.RAW:
continue
load_block.append_op(
type='load',
inputs={},
outputs={'Out': [name_var_dict[each_var_name]]},
attrs={'file_path': each_var_name})
executor.run(load_prog)
def get_model_id():
prefix = ''
if args.prefix is not None:
prefix = args.prefix + '-' # for some notes.
model_id = prefix + args.dataset + \
'-epo_' + str(args.num_epoch) + \
'-b_' + str(args.batch_size) + \
'-reg_' + str(args.delta_reg) + \
'-wd_' + str(args.wd_rate)
return model_id
def train():
dataset = args.dataset
image_shape = [3, 224, 224]
pretrained_model = args.pretrained_model
class_map_path = f'{global_data_path}/{dataset}/readable_label.txt'
if os.path.exists(class_map_path):
logger.info(
"The map of readable label and numerical label has been found!")
with open(class_map_path) as f:
label_dict = {}
strinfo = re.compile(r"\d+ ")
for item in f.readlines():
key = int(item.split(" ")[0])
value = [
strinfo.sub("", l).replace("\n", "")
for l in item.split(", ")
]
label_dict[key] = value[0]
assert os.path.isdir(
pretrained_model), "please load right pretrained model path for infer"
# data reader
batch_size = args.batch_size
reader_config = ReaderConfig(f'{global_data_path}/{dataset}', is_test=False)
reader = reader_config.get_reader()
train_reader = paddle.batch(
paddle.reader.shuffle(reader, buf_size=batch_size),
batch_size,
drop_last=True)
# model ops
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
model = ResNet101(is_test=False)
features, logits = model.net(
input=image, class_dim=reader_config.num_classes)
out = fluid.layers.softmax(logits)
# loss, metric
cost = fluid.layers.mean(fluid.layers.cross_entropy(out, label))
accuracy = fluid.layers.accuracy(input=out, label=label)
# delta regularization
# teacher model pre-trained on Imagenet, 1000 classes.
global_name = 't_'
t_model = ResNet101(is_test=True, global_name=global_name)
t_features, _ = t_model.net(input=image, class_dim=1000)
for f in t_features.keys():
t_features[f].stop_gradient = True
# delta loss. hard code for the layer name, which is just before global pooling.
delta_loss = fluid.layers.square(t_features['t_res5c.add.output.5.tmp_0'] -
features['res5c.add.output.5.tmp_0'])
delta_loss = fluid.layers.reduce_mean(delta_loss)
params = fluid.default_main_program().global_block().all_parameters()
parameters = []
for param in params:
if param.trainable:
if global_name in param.name:
print('\tfixing', param.name)
else:
print('\ttraining', param.name)
parameters.append(param.name)
# optimizer, with piecewise_decay learning rate.
total_steps = len(reader_config.image_paths) * args.num_epoch // batch_size
boundaries = [int(total_steps * 2 / 3)]
print('\ttotal learning steps:', total_steps)
print('\tlr decays at:', boundaries)
values = [0.01, 0.001]
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=boundaries, values=values),
momentum=0.9,
parameter_list=parameters,
regularization=fluid.regularizer.L2Decay(args.wd_rate))
cur_lr = optimizer._global_learning_rate()
optimizer.minimize(
cost + args.delta_reg * delta_loss, parameter_list=parameters)
# data reader
feed_order = ['image', 'label']
# executor (session)
place = fluid.CUDAPlace(
args.use_cuda) if args.use_cuda >= 0 else fluid.CPUPlace()
exe = fluid.Executor(place)
# running
main_program = fluid.default_main_program()
start_program = fluid.default_startup_program()
feed_var_list_loop = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
exe.run(start_program)
loading_parameters = {}
t_loading_parameters = {}
for p in main_program.all_parameters():
if 'fc' not in p.name:
if global_name in p.name:
new_name = os.path.join(pretrained_model,
p.name.split(global_name)[-1])
t_loading_parameters[new_name] = p
print(new_name, p.name)
else:
name = os.path.join(pretrained_model, p.name)
loading_parameters[name] = p
print(name, p.name)
else:
print(f'not loading {p.name}')
load_vars_by_dict(exe, loading_parameters, main_program=main_program)
load_vars_by_dict(exe, t_loading_parameters, main_program=main_program)
step = 0
# test_data = reader_creator_all_in_memory('./datasets/PetImages', is_test=True)
for e_id in range(args.num_epoch):
avg_delta_loss = AverageMeter()
avg_loss = AverageMeter()
avg_accuracy = AverageMeter()
batch_time = AverageMeter()
end = time.time()
for step_id, data_train in enumerate(train_reader()):
wrapped_results = exe.run(
main_program,
feed=feeder.feed(data_train),
fetch_list=[cost, accuracy, delta_loss, cur_lr])
# print(avg_loss_value[2])
batch_time.update(time.time() - end)
end = time.time()
avg_loss.update(wrapped_results[0][0], len(data_train))
avg_accuracy.update(wrapped_results[1][0], len(data_train))
avg_delta_loss.update(wrapped_results[2][0], len(data_train))
if step % 100 == 0:
print(
f"\tEpoch {e_id}, Global_Step {step}, Batch_Time {batch_time.avg: .2f},"
f" LR {wrapped_results[3][0]}, "
f"Loss {avg_loss.avg: .4f}, Acc {avg_accuracy.avg: .4f}, Delta_Loss {avg_delta_loss.avg: .4f}"
)
step += 1
if args.outdir is not None:
try:
os.makedirs(args.outdir, exist_ok=True)
fluid.io.save_params(
executor=exe, dirname=args.outdir + '/' + get_model_id())
except:
print('\t Not saving trained parameters.')
if e_id == args.num_epoch - 1:
print("kpis\ttrain_cost\t%f" % avg_loss.avg)
print("kpis\ttrain_acc\t%f" % avg_accuracy.avg)
def test():
image_shape = [3, 224, 224]
pretrained_model = args.outdir + '/' + get_model_id()
# data reader
batch_size = args.batch_size
reader_config = ReaderConfig(
f'{global_data_path}/{args.dataset}', is_test=True)
reader = reader_config.get_reader()
test_reader = paddle.batch(reader, batch_size)
# model ops
image = fluid.data(
name='image', shape=[None] + image_shape, dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
model = ResNet101(is_test=True)
_, logits = model.net(input=image, class_dim=reader_config.num_classes)
out = fluid.layers.softmax(logits)
# loss, metric
cost = fluid.layers.mean(fluid.layers.cross_entropy(out, label))
accuracy = fluid.layers.accuracy(input=out, label=label)
# data reader
feed_order = ['image', 'label']
# executor (session)
place = fluid.CUDAPlace(
args.use_cuda) if args.use_cuda >= 0 else fluid.CPUPlace()
exe = fluid.Executor(place)
# running
main_program = fluid.default_main_program()
start_program = fluid.default_startup_program()
feed_var_list_loop = [
main_program.global_block().var(var_name) for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
exe.run(start_program)
fluid.io.load_params(exe, pretrained_model)
step = 0
avg_loss = AverageMeter()
avg_accuracy = AverageMeter()
for step_id, data_train in enumerate(test_reader()):
avg_loss_value = exe.run(
main_program,
feed=feeder.feed(data_train),
fetch_list=[cost, accuracy])
avg_loss.update(avg_loss_value[0], len(data_train))
avg_accuracy.update(avg_loss_value[1], len(data_train))
if step_id % 10 == 0:
print("\nBatch %d, Loss %f, Acc %f" % (step_id, avg_loss.avg,
avg_accuracy.avg))
step += 1
print("test counts:", avg_loss.count)
print("test_cost\t%f" % avg_loss.avg)
print("test_acc\t%f" % avg_accuracy.avg)
if __name__ == '__main__':
print(args)
train()
test()
```
#### File: mask_detection/python/infer.py
```python
import os
import sys
import ast
import time
import json
import argparse
import numpy as np
import cv2
import paddle.fluid as fluid
from PIL import Image
from PIL import ImageDraw
import argparse
def parse_args():
parser = argparse.ArgumentParser('mask detection.')
parser.add_argument(
'--models_dir', type=str, default='', help='path of models.')
parser.add_argument(
'--img_paths', type=str, default='', help='path of images')
parser.add_argument(
'--video_path', type=str, default='', help='path of video.')
parser.add_argument(
'--use_camera',
type=bool,
default=False,
help='switch detect video or camera, default:video.')
parser.add_argument(
'--open_imshow',
type=bool,
default=False,
help='visualize video detection results in real time.')
parser.add_argument(
'--use_gpu',
type=bool,
default=False,
help='switch cpu/gpu, default:cpu.')
args = parser.parse_args()
return args
class FaceResult:
def __init__(self, rect_data, rect_info):
self.rect_info = rect_info
self.rect_data = rect_data
self.class_id = -1
self.score = 0.0
def VisualizeResult(im, faces):
LABELS = ['NO_MASK', 'MASK']
COLORS = [(0, 0, 255), (0, 255, 0)]
for face in faces:
label = LABELS[face.class_id]
color = COLORS[face.class_id]
left, right, top, bottom = [int(item) for item in face.rect_info]
label_position = (left, top)
cv2.putText(im, label, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1,
color, 2, cv2.LINE_AA)
cv2.rectangle(im, (left, top), (right, bottom), color, 3)
return im
def LoadModel(model_dir, use_gpu=False):
config = fluid.core.AnalysisConfig(model_dir + '/__model__',
model_dir + '/__params__')
if use_gpu:
config.enable_use_gpu(100, 0)
config.switch_ir_optim(True)
else:
config.disable_gpu()
config.disable_glog_info()
config.switch_specify_input_names(True)
config.enable_memory_optim()
return fluid.core.create_paddle_predictor(config)
class MaskClassifier:
def __init__(self, model_dir, mean, scale, use_gpu=False):
self.mean = np.array(mean).reshape((3, 1, 1))
self.scale = np.array(scale).reshape((3, 1, 1))
self.predictor = LoadModel(model_dir, use_gpu)
self.EVAL_SIZE = (128, 128)
def Preprocess(self, faces):
h, w = self.EVAL_SIZE[1], self.EVAL_SIZE[0]
inputs = []
for face in faces:
im = cv2.resize(
face.rect_data, (128, 128),
fx=0,
fy=0,
interpolation=cv2.INTER_CUBIC)
# HWC -> CHW
im = im.swapaxes(1, 2)
im = im.swapaxes(0, 1)
# Convert to float
im = im[:, :, :].astype('float32') / 256.0
# im = (im - mean) * scale
im = im - self.mean
im = im * self.scale
im = im[np.newaxis, :, :, :]
inputs.append(im)
return inputs
def Postprocess(self, output_data, faces):
argmx = np.argmax(output_data, axis=1)
for idx in range(len(faces)):
faces[idx].class_id = argmx[idx]
faces[idx].score = output_data[idx][argmx[idx]]
return faces
def Predict(self, faces):
inputs = self.Preprocess(faces)
if len(inputs) != 0:
input_data = np.concatenate(inputs)
im_tensor = fluid.core.PaddleTensor(
input_data.copy().astype('float32'))
output_data = self.predictor.run([im_tensor])[0]
output_data = output_data.as_ndarray()
self.Postprocess(output_data, faces)
class FaceDetector:
def __init__(self, model_dir, mean, scale, use_gpu=False, threshold=0.7):
self.mean = np.array(mean).reshape((3, 1, 1))
self.scale = np.array(scale).reshape((3, 1, 1))
self.threshold = threshold
self.predictor = LoadModel(model_dir, use_gpu)
def Preprocess(self, image, shrink):
h, w = int(image.shape[1] * shrink), int(image.shape[0] * shrink)
im = cv2.resize(
image, (h, w), fx=0, fy=0, interpolation=cv2.INTER_CUBIC)
# HWC -> CHW
im = im.swapaxes(1, 2)
im = im.swapaxes(0, 1)
# Convert to float
im = im[:, :, :].astype('float32')
# im = (im - mean) * scale
im = im - self.mean
im = im * self.scale
im = im[np.newaxis, :, :, :]
return im
def Postprocess(self, output_data, ori_im, shrink):
det_out = []
h, w = ori_im.shape[0], ori_im.shape[1]
for out in output_data:
class_id = int(out[0])
score = out[1]
xmin = (out[2] * w)
ymin = (out[3] * h)
xmax = (out[4] * w)
ymax = (out[5] * h)
wd = xmax - xmin
hd = ymax - ymin
valid = (xmax >= xmin and xmin > 0 and ymax >= ymin and ymin > 0)
if score > self.threshold and valid:
roi_rect = ori_im[int(ymin):int(ymax), int(xmin):int(xmax)]
det_out.append(FaceResult(roi_rect, [xmin, xmax, ymin, ymax]))
return det_out
def Predict(self, image, shrink):
ori_im = image.copy()
im = self.Preprocess(image, shrink)
im_tensor = fluid.core.PaddleTensor(im.copy().astype('float32'))
output_data = self.predictor.run([im_tensor])[0]
output_data = output_data.as_ndarray()
return self.Postprocess(output_data, ori_im, shrink)
def predict_images(args):
detector = FaceDetector(
model_dir=args.models_dir + '/pyramidbox_lite/',
mean=[104.0, 177.0, 123.0],
scale=[0.007843, 0.007843, 0.007843],
use_gpu=args.use_gpu,
threshold=0.7)
classifier = MaskClassifier(
model_dir=args.models_dir + '/mask_detector/',
mean=[0.5, 0.5, 0.5],
scale=[1.0, 1.0, 1.0],
use_gpu=args.use_gpu)
names = []
image_paths = []
for name in os.listdir(args.img_paths):
if name.split('.')[-1] in ['jpg', 'png', 'jpeg']:
names.append(name)
image_paths.append(os.path.join(args.img_paths, name))
images = [cv2.imread(path, cv2.IMREAD_COLOR) for path in image_paths]
path = './result'
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
for idx in range(len(images)):
im = images[idx]
det_out = detector.Predict(im, shrink=0.7)
classifier.Predict(det_out)
img = VisualizeResult(im, det_out)
cv2.imwrite(os.path.join(path, names[idx] + '.result.jpg'), img)
def predict_video(args, im_shape=(1920, 1080), use_camera=False):
if args.use_camera:
capture = cv2.VideoCapture(0)
else:
capture = cv2.VideoCapture(args.video_path)
detector = FaceDetector(
model_dir=args.models_dir + '/pyramidbox_lite/',
mean=[104.0, 177.0, 123.0],
scale=[0.007843, 0.007843, 0.007843],
use_gpu=args.use_gpu,
threshold=0.7)
classifier = MaskClassifier(
model_dir=args.models_dir + '/mask_detector/',
mean=[0.5, 0.5, 0.5],
scale=[1.0, 1.0, 1.0],
use_gpu=args.use_gpu)
path = './result'
isExists = os.path.exists(path)
if not isExists:
os.makedirs(path)
fps = 30
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
writer = cv2.VideoWriter(
os.path.join(path, 'result.mp4'), fourcc, fps, (width, height))
import time
start_time = time.time()
index = 0
while (1):
ret, frame = capture.read()
if not ret:
break
print('detect frame:%d' % (index))
index += 1
det_out = detector.Predict(frame, shrink=0.5)
classifier.Predict(det_out)
end_pre = time.time()
im = VisualizeResult(frame, det_out)
writer.write(im)
if args.open_imshow:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end_time = time.time()
print("Average prediction time per frame:", (end_time - start_time) / index)
writer.release()
if __name__ == "__main__":
args = parse_args()
print(args.models_dir)
if args.img_paths != '':
predict_images(args)
elif args.video_path != '' or args.use_camera:
predict_video(args)
```
#### File: demo/pairwise_text_matching/embedding_pairwise_matching.py
```python
import argparse
import ast
import paddlehub as hub
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.")
parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.")
parser.add_argument("--checkpoint_dir", type=str, default=None, help="Directory to model checkpoint")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.")
parser.add_argument("--network", type=str, default=None, help="Pre-defined network which was connected after module.")
args = parser.parse_args()
# yapf: enable.
jieba_paddle = hub.Module(name='jieba_paddle')
def cut(text):
res = jieba_paddle.cut(text, use_paddle=False)
return res
if __name__ == '__main__':
# Load Paddlehub word embedding pretrained model
module = hub.Module(name="word2vec_skipgram")
# module = hub.Module(name="simnet_bow")
# module = hub.Module(name="tencent_ailab_chinese_embedding_small")
# Pairwise task needs: query, title_left, right_title (3 slots)
inputs, outputs, program = module.context(
trainable=True, max_seq_len=args.max_seq_len, num_slots=3)
# Tokenizer tokenizes the text data and encodes the data as model needed.
# If you use transformer modules (ernie, bert, roberta and so on), tokenizer should be hub.BertTokenizer.
# Otherwise, tokenizer should be hub.CustomTokenizer.
# If you choose CustomTokenizer, you can also change the chinese word segmentation tool, for example jieba.
tokenizer = hub.CustomTokenizer(
vocab_file=module.get_vocab_path(),
tokenize_chinese_chars=True,
cut_function=cut, # jieba.cut as cut function
)
dataset = hub.dataset.DuEL(
tokenizer=tokenizer, max_seq_len=args.max_seq_len)
# Construct transfer learning network
# Use token-level output.
query = outputs["emb"]
left = outputs['emb_2']
right = outputs['emb_3']
# Select fine-tune strategy
strategy = hub.DefaultStrategy(
optimizer_name="sgd", learning_rate=args.learning_rate)
# Setup RunConfig for PaddleHub Fine-tune API
config = hub.RunConfig(
eval_interval=300,
use_data_parallel=False,
use_cuda=False,
num_epoch=args.num_epoch,
batch_size=args.batch_size,
checkpoint_dir=args.checkpoint_dir,
strategy=strategy)
# Define a text matching task by PaddleHub's API
# network choice: bow, cnn, gru, lstm (PaddleHub pre-defined network)
matching_task = hub.PairwiseTextMatchingTask(
dataset=dataset,
query_feature=query,
left_feature=left,
right_feature=right,
tokenizer=tokenizer,
network=args.network,
config=config)
# Fine-tune and evaluate by PaddleHub's API
# will finish training, evaluation, testing, save model automatically
matching_task.finetune_and_eval()
```
#### File: demo/text_classification/text_classifier_dygraph.py
```python
import argparse
import os
import numpy as np
import paddlehub as hub
import paddle.fluid as fluid
from paddle.fluid.dygraph import Linear
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.optimizer import AdamOptimizer
# yapf: disable
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--num_epoch", type=int, default=1, help="Number of epoches for fine-tuning.")
parser.add_argument("--batch_size", type=int, default=16, help="Total examples' number in batch for training.")
parser.add_argument("--log_interval", type=int, default=10, help="log interval.")
parser.add_argument("--save_interval", type=int, default=10, help="save interval.")
parser.add_argument("--checkpoint_dir", type=str, default="paddlehub_finetune_ckpt_dygraph", help="Path to save log data.")
parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.")
# yapf: enable.
class TransformerClassifier(fluid.dygraph.Layer):
def __init__(self, num_classes, transformer):
super(TransformerClassifier, self).__init__()
self.num_classes = num_classes
self.transformer = transformer
self.fc = Linear(input_dim=768, output_dim=num_classes)
def forward(self, input_ids, position_ids, segment_ids, input_mask):
result = self.transformer(input_ids, position_ids, segment_ids,
input_mask)
cls_feats = fluid.layers.dropout(
result['pooled_output'],
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
cls_feats = fluid.layers.reshape(cls_feats, shape=[-1, 768])
pred = self.fc(cls_feats)
return fluid.layers.softmax(pred)
def finetune(args):
module = hub.Module(name="ernie", max_seq_len=args.max_seq_len)
# Use the appropriate tokenizer to preprocess the data set
# For ernie_tiny, it will do word segmentation to get subword. More details: https://www.jiqizhixin.com/articles/2019-11-06-9
if module.name == "ernie_tiny":
tokenizer = hub.ErnieTinyTokenizer(
vocab_file=module.get_vocab_path(),
spm_path=module.get_spm_path(),
word_dict_path=module.get_word_dict_path(),
)
else:
tokenizer = hub.BertTokenizer(vocab_file=module.get_vocab_path())
dataset = hub.dataset.ChnSentiCorp(
tokenizer=tokenizer, max_seq_len=args.max_seq_len)
with fluid.dygraph.guard():
tc = TransformerClassifier(
num_classes=dataset.num_labels, transformer=module)
adam = AdamOptimizer(learning_rate=1e-5, parameter_list=tc.parameters())
state_dict_path = os.path.join(args.checkpoint_dir,
'dygraph_state_dict')
if os.path.exists(state_dict_path + '.pdparams'):
state_dict, _ = fluid.load_dygraph(state_dict_path)
tc.load_dict(state_dict)
loss_sum = acc_sum = cnt = 0
for epoch in range(args.num_epoch):
for batch_id, data in enumerate(
dataset.batch_records_generator(
phase="train",
batch_size=args.batch_size,
shuffle=True,
pad_to_batch_max_seq_len=False)):
batch_size = len(data["input_ids"])
input_ids = np.array(data["input_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
position_ids = np.array(data["position_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
segment_ids = np.array(data["segment_ids"]).astype(
np.int64).reshape([batch_size, -1, 1])
input_mask = np.array(data["input_mask"]).astype(
np.float32).reshape([batch_size, -1, 1])
labels = np.array(data["label"]).astype(np.int64).reshape(
[batch_size, 1])
pred = tc(input_ids, position_ids, segment_ids, input_mask)
acc = fluid.layers.accuracy(pred, to_variable(labels))
loss = fluid.layers.cross_entropy(pred, to_variable(labels))
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
loss_sum += avg_loss.numpy() * labels.shape[0]
acc_sum += acc.numpy() * labels.shape[0]
cnt += labels.shape[0]
if batch_id % args.log_interval == 0:
print('epoch {}: loss {}, acc {}'.format(
epoch, loss_sum / cnt, acc_sum / cnt))
loss_sum = acc_sum = cnt = 0
if batch_id % args.save_interval == 0:
state_dict = tc.state_dict()
fluid.save_dygraph(state_dict, state_dict_path)
if __name__ == "__main__":
args = parser.parse_args()
finetune(args)
```
#### File: classification/efficientnetb2_imagenet/efficientnet.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
import math
import copy
import paddle.fluid as fluid
from efficientnetb2_imagenet.layers import conv2d, init_batch_norm_layer, init_fc_layer
__all__ = [
'EfficientNet', 'EfficientNetB0_small', 'EfficientNetB0', 'EfficientNetB1',
'EfficientNetB2', 'EfficientNetB3', 'EfficientNetB4', 'EfficientNetB5',
'EfficientNetB6', 'EfficientNetB7'
]
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum',
'batch_norm_epsilon',
'dropout_rate',
'num_classes',
'width_coefficient',
'depth_coefficient',
'depth_divisor',
'min_depth',
'drop_connect_rate',
])
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'
])
GlobalParams.__new__.__defaults__ = (None, ) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None, ) * len(BlockArgs._fields)
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,resolution,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name]
def efficientnet(width_coefficient=None,
depth_coefficient=None,
dropout_rate=0.2,
drop_connect_rate=0.2):
""" Get block arguments according to parameter and coefficients. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25',
'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25',
'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25',
'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
num_classes=1000,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, _, p = efficientnet_params(model_name)
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p)
else:
raise NotImplementedError(
'model name is not pre-defined: %s' % model_name)
if override_params:
global_params = global_params._replace(**override_params)
return blocks_args, global_params
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth,
int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
class EfficientNet():
def __init__(self,
name='b0',
padding_type='SAME',
override_params=None,
is_test=False,
use_se=True):
valid_names = ['b' + str(i) for i in range(8)]
assert name in valid_names, 'efficient name should be in b0~b7'
model_name = 'efficientnet-' + name
self._blocks_args, self._global_params = get_model_params(
model_name, override_params)
self._bn_mom = self._global_params.batch_norm_momentum
self._bn_eps = self._global_params.batch_norm_epsilon
self.padding_type = padding_type
self.use_se = use_se
def net(self, input, class_dim=1000, is_test=False):
conv = self.extract_features(input, is_test=is_test)
out_channels = round_filters(1280, self._global_params)
conv = self.conv_bn_layer(
conv,
num_filters=out_channels,
filter_size=1,
bn_act='swish',
bn_mom=self._bn_mom,
bn_eps=self._bn_eps,
padding_type=self.padding_type,
name='',
conv_name='_conv_head',
bn_name='_bn1')
pool = fluid.layers.pool2d(
input=conv, pool_type='avg', global_pooling=True, use_cudnn=False)
if not is_test and self._global_params.dropout_rate:
pool = fluid.layers.dropout(
pool,
self._global_params.dropout_rate,
dropout_implementation='upscale_in_train')
param_attr, bias_attr = init_fc_layer(class_dim, '_fc')
out = fluid.layers.fc(
pool,
class_dim,
name='_fc',
param_attr=param_attr,
bias_attr=bias_attr)
return out, pool
def _drop_connect(self, inputs, prob, is_test):
if is_test:
return inputs
keep_prob = 1.0 - prob
random_tensor = keep_prob + fluid.layers.uniform_random_batch_size_like(
inputs, [-1, 1, 1, 1], min=0., max=1.)
binary_tensor = fluid.layers.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def _expand_conv_norm(self, inputs, block_args, is_test, name=None):
# Expansion phase
oup = block_args.input_filters * block_args.expand_ratio # number of output channels
if block_args.expand_ratio != 1:
conv = self.conv_bn_layer(
inputs,
num_filters=oup,
filter_size=1,
bn_act=None,
bn_mom=self._bn_mom,
bn_eps=self._bn_eps,
padding_type=self.padding_type,
name=name,
conv_name=name + '_expand_conv',
bn_name='_bn0')
return conv
def _depthwise_conv_norm(self, inputs, block_args, is_test, name=None):
k = block_args.kernel_size
s = block_args.stride
if isinstance(s, list) or isinstance(s, tuple):
s = s[0]
oup = block_args.input_filters * block_args.expand_ratio # number of output channels
conv = self.conv_bn_layer(
inputs,
num_filters=oup,
filter_size=k,
stride=s,
num_groups=oup,
bn_act=None,
padding_type=self.padding_type,
bn_mom=self._bn_mom,
bn_eps=self._bn_eps,
name=name,
use_cudnn=False,
conv_name=name + '_depthwise_conv',
bn_name='_bn1')
return conv
def _project_conv_norm(self, inputs, block_args, is_test, name=None):
final_oup = block_args.output_filters
conv = self.conv_bn_layer(
inputs,
num_filters=final_oup,
filter_size=1,
bn_act=None,
padding_type=self.padding_type,
bn_mom=self._bn_mom,
bn_eps=self._bn_eps,
name=name,
conv_name=name + '_project_conv',
bn_name='_bn2')
return conv
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride=1,
num_groups=1,
padding_type="SAME",
conv_act=None,
bn_act='swish',
use_cudnn=True,
use_bn=True,
bn_mom=0.9,
bn_eps=1e-05,
use_bias=False,
name=None,
conv_name=None,
bn_name=None):
conv = conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
groups=num_groups,
act=conv_act,
padding_type=padding_type,
use_cudnn=use_cudnn,
name=conv_name,
use_bias=use_bias)
if use_bn == False:
return conv
else:
bn_name = name + bn_name
param_attr, bias_attr = init_batch_norm_layer(bn_name)
return fluid.layers.batch_norm(
input=conv,
act=bn_act,
momentum=bn_mom,
epsilon=bn_eps,
name=bn_name,
moving_mean_name=bn_name + '_mean',
moving_variance_name=bn_name + '_variance',
param_attr=param_attr,
bias_attr=bias_attr)
def _conv_stem_norm(self, inputs, is_test):
out_channels = round_filters(32, self._global_params)
bn = self.conv_bn_layer(
inputs,
num_filters=out_channels,
filter_size=3,
stride=2,
bn_act=None,
bn_mom=self._bn_mom,
padding_type=self.padding_type,
bn_eps=self._bn_eps,
name='',
conv_name='_conv_stem',
bn_name='_bn0')
return bn
def mb_conv_block(self,
inputs,
block_args,
is_test=False,
drop_connect_rate=None,
name=None):
# Expansion and Depthwise Convolution
oup = block_args.input_filters * block_args.expand_ratio # number of output channels
has_se = self.use_se and (block_args.se_ratio is
not None) and (0 < block_args.se_ratio <= 1)
id_skip = block_args.id_skip # skip connection and drop connect
conv = inputs
if block_args.expand_ratio != 1:
conv = fluid.layers.swish(
self._expand_conv_norm(conv, block_args, is_test, name))
conv = fluid.layers.swish(
self._depthwise_conv_norm(conv, block_args, is_test, name))
# Squeeze and Excitation
if has_se:
num_squeezed_channels = max(
1, int(block_args.input_filters * block_args.se_ratio))
conv = self.se_block(conv, num_squeezed_channels, oup, name)
conv = self._project_conv_norm(conv, block_args, is_test, name)
# Skip connection and drop connect
input_filters, output_filters = block_args.input_filters, block_args.output_filters
if id_skip and block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
conv = self._drop_connect(conv, drop_connect_rate, is_test)
conv = fluid.layers.elementwise_add(conv, inputs)
return conv
def se_block(self, inputs, num_squeezed_channels, oup, name):
x_squeezed = fluid.layers.pool2d(
input=inputs, pool_type='avg', global_pooling=True, use_cudnn=False)
x_squeezed = conv2d(
x_squeezed,
num_filters=num_squeezed_channels,
filter_size=1,
use_bias=True,
padding_type=self.padding_type,
act='swish',
name=name + '_se_reduce')
x_squeezed = conv2d(
x_squeezed,
num_filters=oup,
filter_size=1,
use_bias=True,
padding_type=self.padding_type,
name=name + '_se_expand')
se_out = inputs * fluid.layers.sigmoid(x_squeezed)
return se_out
def extract_features(self, inputs, is_test):
""" Returns output of the final convolution layer """
conv = fluid.layers.swish(self._conv_stem_norm(inputs, is_test=is_test))
block_args_copy = copy.deepcopy(self._blocks_args)
idx = 0
block_size = 0
for block_arg in block_args_copy:
block_arg = block_arg._replace(
input_filters=round_filters(block_arg.input_filters,
self._global_params),
output_filters=round_filters(block_arg.output_filters,
self._global_params),
num_repeat=round_repeats(block_arg.num_repeat,
self._global_params))
block_size += 1
for _ in range(block_arg.num_repeat - 1):
block_size += 1
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters,
self._global_params),
output_filters=round_filters(block_args.output_filters,
self._global_params),
num_repeat=round_repeats(block_args.num_repeat,
self._global_params))
# The first block needs to take care of stride and filter size increase.
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / block_size
conv = self.mb_conv_block(conv, block_args, is_test,
drop_connect_rate,
'_blocks.' + str(idx) + '.')
idx += 1
if block_args.num_repeat > 1:
block_args = block_args._replace(
input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / block_size
conv = self.mb_conv_block(conv, block_args, is_test,
drop_connect_rate,
'_blocks.' + str(idx) + '.')
idx += 1
return conv
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def EfficientNetB0_small(is_test=False,
padding_type='SAME',
override_params=None,
use_se=False):
model = EfficientNet(
name='b0',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB0(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b0',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB1(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b1',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB2(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b2',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB3(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b3',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB4(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b4',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB5(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b5',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB6(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b6',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
def EfficientNetB7(is_test=False,
padding_type='SAME',
override_params=None,
use_se=True):
model = EfficientNet(
name='b7',
is_test=is_test,
padding_type=padding_type,
override_params=override_params,
use_se=use_se)
return model
```
#### File: classification/resnet50_vd_10w/module.py
```python
from __future__ import absolute_import
from __future__ import division
import ast
import argparse
import os
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix
from resnet50_vd_10w.processor import postprocess, base64_to_cv2
from resnet50_vd_10w.data_feed import reader
from resnet50_vd_10w.resnet_vd import ResNet50_vd
@moduleinfo(
name="resnet50_vd_10w",
type="CV/image_classification",
author="paddlepaddle",
author_email="<EMAIL>",
summary=
"ResNet50vd is a image classfication model, this module is trained with Baidu's self-built dataset with 100,000 categories.",
version="1.0.0")
class ResNet50vd(hub.Module):
def _initialize(self):
self.default_pretrained_model_path = os.path.join(
self.directory, "model")
def get_expected_image_width(self):
return 224
def get_expected_image_height(self):
return 224
def get_pretrained_images_mean(self):
im_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3)
return im_mean
def get_pretrained_images_std(self):
im_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3)
return im_std
def context(self, trainable=True, pretrained=True):
"""context for transfer learning.
Args:
trainable (bool): Set parameters in program to be trainable.
pretrained (bool) : Whether to load pretrained model.
Returns:
inputs (dict): key is 'image', corresponding vaule is image tensor.
outputs (dict): key is 'feature_map', corresponding value is the result of the layer before the fully connected layer.
context_prog (fluid.Program): program for transfer learning.
"""
context_prog = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(context_prog, startup_prog):
with fluid.unique_name.guard():
image = fluid.layers.data(
name="image", shape=[3, 224, 224], dtype="float32")
resnet_vd = ResNet50_vd()
feature_map = resnet_vd.net(input=image)
name_prefix = '@HUB_{}@'.format(self.name)
inputs = {'image': name_prefix + image.name}
outputs = {'feature_map': name_prefix + feature_map.name}
add_vars_prefix(context_prog, name_prefix)
add_vars_prefix(startup_prog, name_prefix)
global_vars = context_prog.global_block().vars
inputs = {
key: global_vars[value]
for key, value in inputs.items()
}
outputs = {
key: global_vars[value]
for key, value in outputs.items()
}
place = fluid.CPUPlace()
exe = fluid.Executor(place)
# pretrained
if pretrained:
def _if_exist(var):
b = os.path.exists(
os.path.join(self.default_pretrained_model_path,
var.name))
return b
fluid.io.load_vars(
exe,
self.default_pretrained_model_path,
context_prog,
predicate=_if_exist)
else:
exe.run(startup_prog)
# trainable
for param in context_prog.global_block().iter_parameters():
param.trainable = trainable
return inputs, outputs, context_prog
def save_inference_model(self,
dirname,
model_filename=None,
params_filename=None,
combined=True):
if combined:
model_filename = "__model__" if not model_filename else model_filename
params_filename = "__params__" if not params_filename else params_filename
place = fluid.CPUPlace()
exe = fluid.Executor(place)
program, feeded_var_names, target_vars = fluid.io.load_inference_model(
dirname=self.default_pretrained_model_path, executor=exe)
fluid.io.save_inference_model(
dirname=dirname,
main_program=program,
executor=exe,
feeded_var_names=feeded_var_names,
target_vars=target_vars,
model_filename=model_filename,
params_filename=params_filename)
```
#### File: face_detection/pyramidbox_face_detection/processor.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from collections import OrderedDict
import base64
import cv2
import numpy as np
from PIL import Image, ImageDraw
__all__ = ['base64_to_cv2', 'postprocess']
def base64_to_cv2(b64str):
data = base64.b64decode(b64str.encode('utf8'))
data = np.fromstring(data, np.uint8)
data = cv2.imdecode(data, cv2.IMREAD_COLOR)
return data
def check_dir(dir_path):
"""
Create directory to save processed image.
Args:
dir_path (str): directory path to save images.
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
elif os.path.isfile(dir_path):
os.remove(dir_path)
os.makedirs(dir_path)
def get_save_image_name(img, org_im_path, output_dir):
"""
Get save image name.
"""
# name prefix of original image
org_im_name = os.path.split(org_im_path)[-1]
im_prefix = os.path.splitext(org_im_name)[0]
# extension
if img.mode == 'RGBA':
ext = '.png'
else:
ext = '.jpg'
# save image path
save_im_path = os.path.join(output_dir, im_prefix + ext)
if os.path.exists(save_im_path):
save_im_path = os.path.join(
output_dir, im_prefix + 'time={}'.format(int(time.time())) + ext)
return save_im_path
def draw_bboxes(image, bboxes, org_im_path, output_dir):
"""
Draw bounding boxes on image.
Args:
bboxes (np.array): bounding boxes.
"""
draw = ImageDraw.Draw(image)
for i in range(len(bboxes)):
xmin, ymin, xmax, ymax = bboxes[i]
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=4,
fill='red')
save_name = get_save_image_name(image, org_im_path, output_dir)
image.save(save_name)
def postprocess(data_out, org_im, org_im_path, org_im_width, org_im_height,
output_dir, visualization, score_thresh):
"""
Postprocess output of network. one image at a time.
Args:
data_out (numpy.ndarray): output of network.
org_im: (PIL.Image object): original image.
org_im_path (str): path of original image.
org_im_width (int): width of original image.
org_im_height (int): height of original image.
output_dir (str): output directory to store image.
visualization (bool): whether to save image or not.
Returns:
output (dict): keys are 'data' and 'path', the correspoding values are:
data (list[dict]): 5 keys, where
'left', 'top', 'right', 'bottom' are the coordinate of detection bounding box,
'confidence' is the confidence this bbox.
path (str): The path of original image.
"""
output = dict()
output['data'] = list()
output['path'] = org_im_path
if data_out.shape[0] == 0:
print("No face detected in {}".format(org_im_path))
else:
det_conf = data_out[:, 1]
det_xmin = org_im_width * data_out[:, 2]
det_ymin = org_im_height * data_out[:, 3]
det_xmax = org_im_width * data_out[:, 4]
det_ymax = org_im_height * data_out[:, 5]
dets = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax,
det_conf))
keep_index = np.where(dets[:, 4] >= score_thresh)[0]
dets = dets[keep_index, :]
if dets.shape[0] == 0:
print("No face detected in {}".format(org_im_path))
else:
for detect_face in dets:
dt_i = dict()
dt_i['left'] = float(detect_face[0])
dt_i['top'] = float(detect_face[1])
dt_i['right'] = float(detect_face[2])
dt_i['bottom'] = float(detect_face[3])
dt_i['confidence'] = float(detect_face[4])
output['data'].append(dt_i)
if visualization:
check_dir(output_dir)
draw_bboxes(org_im, dets[:, 0:4], org_im_path, output_dir)
return output
```
#### File: lexical_analysis/lac/ahocorasick.py
```python
class Node(object):
"""AC自动机的树结点.
Attributes:
next: dict类型,指向子结点
fail: Node类型,AC自动机的fail指针
length: int类型,判断节点是否为单词
"""
__slots__ = ['next', 'fail', 'length']
def __init__(self):
"""初始化空节点."""
self.next = {}
self.fail = None # fail指针默认为None
self.length = -1
class Ahocorasick(object):
"""实现AC自动机的类
Attributes:
__root: Node类型,AC自动机根节点
"""
def __init__(self):
"""初始化Ahocorasick的根节点__root"""
self.__root = Node()
def add_word(self, word):
"""添加单词word到Trie树中"""
current = self.__root
for char in word:
current = current.next.setdefault(char, Node())
current.length = len(word)
def make(self):
"""构建fail指针路径"""
queue = list()
for key in self.__root.next:
self.__root.next[key].fail = self.__root
queue.append(self.__root.next[key])
# 广度优先算法遍历设置fail指针
while len(queue) > 0:
# 基于当前节点的fail指针设置其子结点的fail指针
current = queue.pop(0)
for k in current.next:
current_fail = current.fail
# 若当前节点有fail指针,尝试设置其子结点的fail指针
while current_fail is not None:
if k in current_fail.next:
current.next[k].fail = current_fail.next[k]
break
current_fail = current_fail.fail
# 若当前节点的fail指针不存在该子结点,令子结点fail指向根节点
if current_fail is None:
current.next[k].fail = self.__root
queue.append(current.next[k])
def search(self, content):
"""后向最大匹配.
对content的文本进行多模匹配,返回后向最大匹配的结果.
Args:
content: string类型, 用于多模匹配的字符串
Returns:
list类型, 最大匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如:
[(0, 2), [4, 7]]
"""
result = []
p = self.__root
for current_position in range(len(content)):
word = content[current_position]
#
while word not in p.next:
if p == self.__root:
break
p = p.fail
else:
p = p.next[word]
if p.length > 0:
result.append((current_position - p.length + 1,
current_position))
return result
def search_all(self, content):
"""多模匹配的完全匹配.
对content的文本进行多模匹配,返回所有匹配结果
Args:
content: string类型, 用于多模匹配的字符串
Returns:
list类型, 所有匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如:
[(0, 2), [4, 7]]
"""
result = []
p = self.__root
for current_position in range(len(content)):
word = content[current_position]
while word not in p.next:
if p == self.__root:
break
p = p.fail
else:
p = p.next[word]
# 回溯查看是否存在以当前字符结尾的单词
tmp = p
while tmp != self.__root:
if tmp.length > 0:
result.append((current_position - tmp.length + 1,
current_position))
tmp = tmp.fail
return result
if __name__ == '__main__':
ah = Ahocorasick()
x = ["百度", "家", "高科技", "科技", "科技公司"]
for i in x:
ah.add_word(i)
ah.make()
string = '百度是家高科技公司'
for begin, end in ah.search_all(string):
print('all:', string[begin:end + 1])
for begin, end in ah.search(string):
print('search:', string[begin:end + 1])
```
#### File: semantic_model/simnet_bow/processor.py
```python
import io
def load_vocab(file_path):
"""
load the given vocabulary
"""
vocab = {}
with io.open(file_path, 'r', encoding='utf8') as f:
wid = 0
for line in f:
line = line.rstrip()
parts = line.split('\t')
vocab[parts[0]] = int(parts[1])
vocab["<unk>"] = len(vocab)
return vocab
text_a_key = "text_1"
text_b_key = "text_2"
def preprocess(lac, word_dict, data_dict, use_gpu=False, batch_size=1):
"""
Convert the word str to word id and pad the text
"""
result = {text_a_key: [], text_b_key: []}
processed_a = lac.lexical_analysis(
data={'text': data_dict[text_a_key]},
use_gpu=use_gpu,
batch_size=batch_size)
processed_b = lac.lexical_analysis(
data={'text': data_dict[text_b_key]}, use_gpu=use_gpu)
unk_id = word_dict['<unk>']
for index, (text_a, text_b) in enumerate(zip(processed_a, processed_b)):
result_i = {'processed': []}
result_i['origin'] = data_dict[text_a_key][index]
for word in text_a['word']:
_index = word_dict.get(word, unk_id)
result_i['processed'].append(_index)
result[text_a_key].append(result_i)
result_i = {'processed': []}
result_i['origin'] = data_dict[text_b_key][index]
for word in text_b['word']:
_index = word_dict.get(word, unk_id)
result_i['processed'].append(_index)
result[text_b_key].append(result_i)
return result
def postprocess(predict_out, data_info):
"""
Convert model's output tensor to pornography label
"""
result = []
pred = predict_out.as_ndarray()
for index in range(len(data_info[text_a_key])):
result_i = {}
result_i[text_a_key] = data_info[text_a_key][index]['origin']
result_i[text_b_key] = data_info[text_b_key][index]['origin']
result_i['similarity'] = float('%.4f' % pred[index][0])
result.append(result_i)
return result
```
#### File: tests/unittests/test_chinese_ocr_db_crnn_server.py
```python
import os
from unittest import TestCase, main
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import paddlehub as hub
class ChineseOCRDBCRNNTestCase(TestCase):
def setUp(self):
self.module = hub.Module(name='chinese_ocr_db_crnn_server')
self.test_images = [
"../image_dataset/text_recognition/11.jpg",
"../image_dataset/text_recognition/test_image.jpg"
]
def test_detect_text(self):
results_1 = self.module.recognize_text(
paths=self.test_images, use_gpu=True)
results_2 = self.module.recognize_text(
paths=self.test_images, use_gpu=False)
test_images = [cv2.imread(img) for img in self.test_images]
results_3 = self.module.recognize_text(
images=test_images, use_gpu=False)
for i, res in enumerate(results_1):
self.assertEqual(res['save_path'], '')
for j, item in enumerate(res['data']):
self.assertEqual(item['confidence'],
results_2[i]['data'][j]['confidence'])
self.assertEqual(item['confidence'],
results_3[i]['data'][j]['confidence'])
self.assertEqual(item['text'], results_2[i]['data'][j]['text'])
self.assertEqual(item['text'], results_3[i]['data'][j]['text'])
self.assertEqual(item['text_box_position'],
results_2[i]['data'][j]['text_box_position'])
self.assertEqual(item['text_box_position'],
results_3[i]['data'][j]['text_box_position'])
if __name__ == '__main__':
main()
```
#### File: tests/unittests/test_efficientnetb4_imagenet.py
```python
import os
from unittest import TestCase, main
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import cv2
import numpy as np
import paddlehub as hub
class EfficientNetB4TestCase(TestCase):
def setUp(self):
self.module = hub.Module(name='efficientnetb4_imagenet')
self.test_images = [
"../image_dataset/classification/animals/dog.jpeg",
"../image_dataset/keypoint_detection/girl2.jpg"
]
self.true_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3).tolist()
self.true_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3).tolist()
def test_classifcation(self):
results_1 = self.module.classify(paths=self.test_images, use_gpu=True)
results_2 = self.module.classify(paths=self.test_images, use_gpu=False)
for index, res in enumerate(results_1):
self.assertTrue(res.keys(), results_2[index].keys())
diff = list(res.values())[0] - list(results_2[index].values())[0]
self.assertTrue((diff < 1e-5))
test_images = [cv2.imread(img) for img in self.test_images]
results_3 = self.module.classify(images=test_images, use_gpu=False)
for index, res in enumerate(results_1):
self.assertTrue(res.keys(), results_3[index].keys())
results_4 = self.module.classify(
images=test_images, use_gpu=True, top_k=2)
for res in results_4:
self.assertEqual(len(res.keys()), 2)
def test_common_apis(self):
width = self.module.get_expected_image_width()
height = self.module.get_expected_image_height()
mean = self.module.get_pretrained_images_mean()
std = self.module.get_pretrained_images_std()
self.assertEqual(width, 224)
self.assertEqual(height, 224)
self.assertEqual(mean.tolist(), self.true_mean)
self.assertEqual(std.tolist(), self.true_std)
if __name__ == '__main__':
main()
```
#### File: tests/unittests/test_ernie_skep_sentiment_analysis.py
```python
import os
from unittest import TestCase, main
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import numpy as np
import paddlehub as hub
class ErnieSkepSentimentAnalysisTestCase(TestCase):
def setUp(self):
self.module = hub.Module(name='ernie_skep_sentiment_analysis')
self.test_text = [[
'飞桨(PaddlePaddle)是国内开源产业级深度学习平台', 'PaddleHub是飞桨生态的预训练模型应用工具'
], ["飞浆PaddleHub"]]
self.test_data = ['你不是不聪明,而是不认真', '虽然小明很努力,但是他还是没有考100分']
self.results = [{
'text': '你不是不聪明,而是不认真',
'sentiment_label': 'negative',
'positive_probs': 0.10738213360309601,
'negative_probs': 0.8926178216934204
},
{
'text': '虽然小明很努力,但是他还是没有考100分',
'sentiment_label': 'negative',
'positive_probs': 0.053915347903966904,
'negative_probs': 0.9460846185684204
}]
def test_predict_sentiment(self):
results_1 = self.module.predict_sentiment(self.test_data, use_gpu=False)
results_2 = self.module.predict_sentiment(self.test_data, use_gpu=True)
for index, res in enumerate(results_1):
self.assertEqual(res['text'], self.results[index]['text'])
self.assertEqual(res['sentiment_label'],
self.results[index]['sentiment_label'])
self.assertTrue(
abs(res['positive_probs'] -
self.results[index]['positive_probs']) < 1e-6)
self.assertTrue(
abs(res['negative_probs'] -
self.results[index]['negative_probs']) < 1e-6)
self.assertEqual(res['text'], results_2[index]['text'])
self.assertEqual(res['sentiment_label'],
results_2[index]['sentiment_label'])
self.assertTrue(
abs(res['positive_probs'] -
results_2[index]['positive_probs']) < 1e-6)
self.assertTrue(
abs(res['negative_probs'] -
results_2[index]['negative_probs']) < 1e-6)
def test_get_embedding(self):
# test batch_size
max_seq_len = 128
results = self.module.get_embedding(
texts=self.test_text,
use_gpu=False,
batch_size=1,
max_seq_len=max_seq_len)
results_2 = self.module.get_embedding(
texts=self.test_text,
use_gpu=False,
batch_size=10,
max_seq_len=max_seq_len)
# 2 sample results
self.assertEqual(len(results), 2)
self.assertEqual(len(results_2), 2)
# sequence embedding and token embedding results per sample
self.assertEqual(len(results[0]), 2)
self.assertEqual(len(results_2[0]), 2)
# sequence embedding shape
self.assertEqual(results[0][0].shape, (1024, ))
self.assertEqual(results_2[0][0].shape, (1024, ))
# token embedding shape
self.assertEqual(results[0][1].shape, (max_seq_len, 1024))
self.assertEqual(results_2[0][1].shape, (max_seq_len, 1024))
# test gpu
results_3 = self.module.get_embedding(
texts=self.test_text,
use_gpu=True,
batch_size=1,
max_seq_len=max_seq_len)
diff = np.abs(results[0][0] - results_3[0][0])
self.assertTrue((diff < 1e-6).all)
diff = np.abs(results[0][1] - results_3[0][1])
self.assertTrue((diff < 1e-6).all)
diff = np.abs(results[1][0] - results_3[1][0])
self.assertTrue((diff < 1e-6).all)
diff = np.abs(results[1][1] - results_3[1][1])
self.assertTrue((diff < 1e-6).all)
def test_get_params_layer(self):
self.module.context()
layers = self.module.get_params_layer()
layers = list(set(layers.values()))
true_layers = [i for i in range(24)]
self.assertEqual(layers, true_layers)
def test_get_spm_path(self):
self.assertEqual(self.module.get_spm_path(), None)
def test_get_word_dict_path(self):
self.assertEqual(self.module.get_word_dict_path(), None)
def test_get_vocab_path(self):
vocab_path = self.module.get_vocab_path()
true_vocab_path = os.path.join(self.module.directory, "assets",
"ernie_1.0_large_ch.vocab.txt")
self.assertEqual(vocab_path, true_vocab_path)
if __name__ == '__main__':
main()
```
#### File: tests/unittests/test_pyramidbox_lite_server_mask.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import unittest
import cv2
import paddle.fluid as fluid
import paddlehub as hub
pic_dir = '../image_dataset/face_detection/'
class TestPyramidBoxLiteServerMask(unittest.TestCase):
@classmethod
def setUpClass(self):
"""Prepare the environment once before execution of all tests.\n"""
self.mask_detector = hub.Module(name="pyramidbox_lite_server_mask")
@classmethod
def tearDownClass(self):
"""clean up the environment after the execution of all tests.\n"""
self.mask_detector = None
def setUp(self):
"Call setUp() to prepare environment\n"
self.test_prog = fluid.Program()
def tearDown(self):
"Call tearDown to restore environment.\n"
self.test_prog = None
def test_single_pic(self):
with fluid.program_guard(self.test_prog):
paths_list = [os.path.join(pic_dir, f) for f in os.listdir(pic_dir)]
print('\n')
for pic_path in paths_list:
print(pic_path)
result = self.mask_detector.face_detection(
paths=[pic_path],
use_gpu=True,
visualization=True,
use_multi_scale=True,
shrink=0.5,
confs_threshold=0.6)
print(result)
def test_batch(self):
with fluid.program_guard(self.test_prog):
paths_list = [os.path.join(pic_dir, f) for f in os.listdir(pic_dir)]
result = self.mask_detector.face_detection(
paths=paths_list,
batch_size=5,
use_gpu=True,
visualization=True,
output_dir='batch_out',
use_multi_scale=True,
shrink=0.5,
confs_threshold=0.6)
print(result)
def test_ndarray(self):
with fluid.program_guard(self.test_prog):
paths_list = [os.path.join(pic_dir, f) for f in os.listdir(pic_dir)]
pics_ndarray = list()
im_list = list()
for pic_path in paths_list:
im = cv2.imread(pic_path)
im_list.append(im)
result = self.mask_detector.face_detection(
images=im_list,
output_dir='ndarray_output',
shrink=1,
confs_threshold=0.6,
use_gpu=True,
visualization=True)
print(result)
def test_save_inference_model(self):
with fluid.program_guard(self.test_prog):
self.mask_detector.save_inference_model(
dirname='pyramidbox_lite_server_mask_model',
model_filename='model',
combined=True)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestPyramidBoxLiteServerMask('test_single_pic'))
suite.addTest(TestPyramidBoxLiteServerMask('test_batch'))
suite.addTest(TestPyramidBoxLiteServerMask('test_ndarray'))
suite.addTest(TestPyramidBoxLiteServerMask('test_save_inference_model'))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
```
#### File: tests/unittests/test_stylepro_artistic.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import unittest
import cv2
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
content_dir = '../image_dataset/style_tranfer/content/'
style_dir = '../image_dataset/style_tranfer/style/'
class TestStyleProjection(unittest.TestCase):
@classmethod
def setUpClass(self):
"""Prepare the environment once before execution of all tests.\n"""
self.style_projection = hub.Module(name="stylepro_artistic")
@classmethod
def tearDownClass(self):
"""clean up the environment after the execution of all tests.\n"""
self.style_projection = None
def setUp(self):
"Call setUp() to prepare environment\n"
self.test_prog = fluid.Program()
def tearDown(self):
"Call tearDown to restore environment.\n"
self.test_prog = None
def test_single_style(self):
with fluid.program_guard(self.test_prog):
content_paths = [
os.path.join(content_dir, f) for f in os.listdir(content_dir)
]
style_paths = [
os.path.join(style_dir, f) for f in os.listdir(style_dir)
]
for style_path in style_paths:
t1 = time.time()
self.style_projection.style_transfer(
paths=[{
'content': content_paths[0],
'styles': [style_path]
}],
alpha=0.8,
use_gpu=True)
t2 = time.time()
print('\nCost time: {}'.format(t2 - t1))
def test_multiple_styles(self):
with fluid.program_guard(self.test_prog):
content_path = os.path.join(content_dir, 'chicago.jpg')
style_paths = [
os.path.join(style_dir, f) for f in os.listdir(style_dir)
]
for j in range(len(style_paths) - 1):
res = self.style_projection.style_transfer(
paths=[{
'content': content_path,
'styles': [style_paths[j], style_paths[j + 1]],
'weights': [1, 2]
}],
alpha=0.8,
use_gpu=True,
visualization=True)
print('#' * 100)
print(res)
print('#' * 100)
def test_input_ndarray(self):
with fluid.program_guard(self.test_prog):
content_arr = cv2.imread(os.path.join(content_dir, 'chicago.jpg'))
content_arr = cv2.cvtColor(content_arr, cv2.COLOR_BGR2RGB)
style_arrs_BGR = [
cv2.imread(os.path.join(style_dir, f))
for f in os.listdir(style_dir)
]
style_arrs_list = [
cv2.cvtColor(arr, cv2.COLOR_BGR2RGB) for arr in style_arrs_BGR
]
for j in range(len(style_arrs_list) - 1):
self.style_projection.style_transfer(
images=[{
'content':
content_arr,
'styles': [style_arrs_list[j], style_arrs_list[j + 1]]
}],
alpha=0.8,
use_gpu=True,
output_dir='transfer_out',
visualization=True)
def test_save_inference_model(self):
with fluid.program_guard(self.test_prog):
self.style_projection.save_inference_model(
dirname='stylepro_artistic',
model_filename='model',
combined=True)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestStyleProjection('test_single_style'))
suite.addTest(TestStyleProjection('test_multiple_styles'))
suite.addTest(TestStyleProjection('test_input_ndarray'))
suite.addTest(TestStyleProjection('test_save_inference_model'))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
```
#### File: tests/unittests/test_yolov3_darknet53_pedestrian.py
```python
import os
import unittest
import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
image_dir = '../image_dataset/face_detection/'
class TestYOLOv3DarkNet53Pedestrian(unittest.TestCase):
@classmethod
def setUpClass(self):
"""Prepare the environment once before execution of all tests."""
self.yolov3_pedestrian_detect = hub.Module(
name="yolov3_darknet53_pedestrian")
@classmethod
def tearDownClass(self):
"""clean up the environment after the execution of all tests."""
self.yolov3_pedestrian_detect = None
def setUp(self):
self.test_prog = fluid.Program()
"Call setUp() to prepare environment\n"
def tearDown(self):
"Call tearDown to restore environment.\n"
self.test_prog = None
def test_context(self):
with fluid.program_guard(self.test_prog):
get_prediction = True
inputs, outputs, program = self.yolov3_pedestrian_detect.context(
pretrained=True, trainable=True, get_prediction=get_prediction)
image = inputs["image"]
im_size = inputs["im_size"]
if get_prediction:
bbox_out = outputs['bbox_out']
else:
head_features = outputs['head_features']
def test_object_detection(self):
with fluid.program_guard(self.test_prog):
paths = list()
for file_path in os.listdir(image_dir):
paths.append(os.path.join(image_dir, file_path))
detection_results = self.yolov3_pedestrian_detect.object_detection(
paths=paths, batch_size=3, visualization=True)
print(detection_results)
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(TestYOLOv3DarkNet53Pedestrian('test_object_detection'))
suite.addTest(TestYOLOv3DarkNet53Pedestrian('test_context'))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
```
#### File: paddlehub/commands/autofinetune.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import shutil
from paddlehub.commands.base_command import BaseCommand, ENTRY
from paddlehub.autofinetune.autoft import PSHE2
from paddlehub.autofinetune.autoft import HAZero
from paddlehub.autofinetune.evaluator import FullTrailEvaluator
from paddlehub.autofinetune.evaluator import PopulationBasedEvaluator
from paddlehub.common.hub_server import CacheUpdater
class AutoFineTuneCommand(BaseCommand):
name = "autofinetune"
def __init__(self, name):
super(AutoFineTuneCommand, self).__init__(name)
self.show_in_help = True
self.name = name
self.description = "PaddleHub helps to finetune a task by searching hyperparameters automatically."
self.parser = argparse.ArgumentParser(
description=self.__class__.__doc__,
prog='%s %s <task to be fintuned in python script>' % (ENTRY,
self.name),
usage='%(prog)s',
add_help=False)
self.module = None
def add_params_file_arg(self):
self.arg_params_to_be_searched_group.add_argument(
"--param_file",
type=str,
default=None,
required=True,
help=
"Hyperparameters to be searched in the yaml format. The number of hyperparameters searched must be greater than 1."
)
def add_autoft_config_arg(self):
self.arg_config_group.add_argument(
"--popsize", type=int, default=5, help="Population size")
self.arg_config_group.add_argument(
"--gpu",
type=str,
default="0",
required=True,
help="The list of gpu devices to be used")
self.arg_config_group.add_argument(
"--round", type=int, default=10, help="Number of searches")
self.arg_config_group.add_argument(
"--output_dir",
type=str,
default=None,
help="Directory to model checkpoint")
self.arg_config_group.add_argument(
"--evaluator",
type=str,
default="populationbased",
help="Choices: fulltrail or populationbased.")
self.arg_config_group.add_argument(
"--tuning_strategy",
type=str,
default="pshe2",
help="Choices: HAZero or PSHE2.")
self.arg_config_group.add_argument(
'opts',
help='See utils/config.py for all options',
default=None,
nargs=argparse.REMAINDER)
def convert_to_other_options(self, config_list):
if len(config_list) % 2 != 0:
raise ValueError(
"Command for finetuned task options config format error! Please check it: {}"
.format(config_list))
options_str = ""
for key, value in zip(config_list[0::2], config_list[1::2]):
options_str += "--" + key + "=" + value + " "
return options_str
def execute(self, argv):
CacheUpdater("hub_autofinetune").start()
if not argv:
print("ERROR: Please specify a script to be finetuned in python.\n")
self.help()
return False
self.fintunee_script = argv[0]
self.parser.prog = '%s %s %s' % (ENTRY, self.name, self.fintunee_script)
self.arg_params_to_be_searched_group = self.parser.add_argument_group(
title="Input options",
description="Hyperparameters to be searched.")
self.arg_config_group = self.parser.add_argument_group(
title="Autofinetune config options",
description=
"Autofintune configuration for controlling autofinetune behavior, not required"
)
self.arg_finetuned_task_group = self.parser.add_argument_group(
title="Finetuned task config options",
description=
"Finetuned task configuration for controlling finetuned task behavior, not required"
)
self.add_params_file_arg()
self.add_autoft_config_arg()
if not argv[1:]:
self.help()
return False
self.args = self.parser.parse_args(argv[1:])
options_str = ""
if self.args.opts is not None:
options_str = self.convert_to_other_options(self.args.opts)
device_ids = self.args.gpu.strip().split(",")
device_ids = [int(device_id) for device_id in device_ids]
if self.args.evaluator.lower() == "fulltrail":
evaluator = FullTrailEvaluator(
self.args.param_file,
self.fintunee_script,
options_str=options_str)
elif self.args.evaluator.lower() == "populationbased":
evaluator = PopulationBasedEvaluator(
self.args.param_file,
self.fintunee_script,
options_str=options_str)
else:
raise ValueError(
"The evaluate %s is not defined!" % self.args.evaluator)
if self.args.tuning_strategy.lower() == "hazero":
autoft = HAZero(
evaluator,
cudas=device_ids,
popsize=self.args.popsize,
output_dir=self.args.output_dir)
elif self.args.tuning_strategy.lower() == "pshe2":
autoft = PSHE2(
evaluator,
cudas=device_ids,
popsize=self.args.popsize,
output_dir=self.args.output_dir)
else:
raise ValueError("The tuning strategy %s is not defined!" %
self.args.tuning_strategy)
run_round_cnt = 0
solutions_modeldirs = {}
print("PaddleHub Autofinetune starts.")
while (not autoft.is_stop()) and run_round_cnt < self.args.round:
print("PaddleHub Autofinetune starts round at %s." % run_round_cnt)
output_dir = autoft._output_dir + "/round" + str(run_round_cnt)
res = autoft.step(output_dir)
solutions_modeldirs.update(res)
evaluator.new_round()
run_round_cnt = run_round_cnt + 1
print("PaddleHub Autofinetune ends.")
best_hparams_origin = autoft.get_best_hparams()
best_hparams_origin = autoft.mpi.bcast(best_hparams_origin)
with open(autoft._output_dir + "/log_file.txt", "w") as f:
best_hparams = evaluator.convert_params(best_hparams_origin)
print("The final best hyperparameters:")
f.write("The final best hyperparameters:\n")
for index, hparam_name in enumerate(autoft.hparams_name_list):
print("%s=%s" % (hparam_name, best_hparams[index]))
f.write(hparam_name + "\t:\t" + str(best_hparams[index]) + "\n")
best_hparams_dir, best_hparams_rank = solutions_modeldirs[tuple(
best_hparams_origin)]
print("The final best eval score is %s." %
autoft.get_best_eval_value())
if autoft.mpi.multi_machine:
print("The final best model parameters are saved as " +
autoft._output_dir + "/best_model on rank " +
str(best_hparams_rank) + " .")
else:
print("The final best model parameters are saved as " +
autoft._output_dir + "/best_model .")
f.write("The final best eval score is %s.\n" %
autoft.get_best_eval_value())
best_model_dir = autoft._output_dir + "/best_model"
if autoft.mpi.rank == best_hparams_rank:
shutil.copytree(best_hparams_dir, best_model_dir)
if autoft.mpi.multi_machine:
f.write(
"The final best model parameters are saved as ./best_model on rank " \
+ str(best_hparams_rank) + " .")
f.write("\t".join(autoft.hparams_name_list) +
"\tsaved_params_dir\trank\n")
else:
f.write(
"The final best model parameters are saved as ./best_model ."
)
f.write("\t".join(autoft.hparams_name_list) +
"\tsaved_params_dir\n")
print(
"The related information about hyperparamemters searched are saved as %s/log_file.txt ."
% autoft._output_dir)
for solution, modeldir in solutions_modeldirs.items():
param = evaluator.convert_params(solution)
param = [str(p) for p in param]
if autoft.mpi.multi_machine:
f.write("\t".join(param) + "\t" + modeldir[0] + "\t" +
str(modeldir[1]) + "\n")
else:
f.write("\t".join(param) + "\t" + modeldir[0] + "\n")
return True
command = AutoFineTuneCommand.instance()
```
#### File: paddlehub/finetune/evaluate.py
```python
import collections
import math
import numpy as np
# Sequence label evaluation functions
def chunk_eval(np_labels, np_infers, np_lens, tag_num, dev_count=1):
def extract_bio_chunk(seq):
chunks = []
cur_chunk = None
null_index = tag_num - 1
for index in range(len(seq)):
tag = seq[index]
tag_type = tag // 2
tag_pos = tag % 2
if tag == null_index:
if cur_chunk is not None:
chunks.append(cur_chunk)
cur_chunk = None
continue
if tag_pos == 0:
if cur_chunk is not None:
chunks.append(cur_chunk)
cur_chunk = {}
cur_chunk = {"st": index, "en": index + 1, "type": tag_type}
else:
if cur_chunk is None:
cur_chunk = {"st": index, "en": index + 1, "type": tag_type}
continue
if cur_chunk["type"] == tag_type:
cur_chunk["en"] = index + 1
else:
chunks.append(cur_chunk)
cur_chunk = {"st": index, "en": index + 1, "type": tag_type}
if cur_chunk is not None:
chunks.append(cur_chunk)
return chunks
null_index = tag_num - 1
num_label = 0
num_infer = 0
num_correct = 0
labels = np_labels.reshape([-1]).astype(np.int32).tolist()
infers = np_infers.reshape([-1]).astype(np.int32).tolist()
all_lens = np_lens.reshape([dev_count, -1]).astype(np.int32).tolist()
base_index = 0
for dev_index in range(dev_count):
lens = all_lens[dev_index]
max_len = 0
for l in lens:
max_len = max(max_len, l)
for i in range(len(lens)):
seq_st = base_index + i * max_len + 1
seq_en = seq_st + (lens[i] - 2)
infer_chunks = extract_bio_chunk(infers[seq_st:seq_en])
label_chunks = extract_bio_chunk(labels[seq_st:seq_en])
num_infer += len(infer_chunks)
num_label += len(label_chunks)
infer_index = 0
label_index = 0
while label_index < len(label_chunks) \
and infer_index < len(infer_chunks):
if infer_chunks[infer_index]["st"] \
< label_chunks[label_index]["st"]:
infer_index += 1
elif infer_chunks[infer_index]["st"] \
> label_chunks[label_index]["st"]:
label_index += 1
else:
if infer_chunks[infer_index]["en"] \
== label_chunks[label_index]["en"] \
and infer_chunks[infer_index]["type"] \
== label_chunks[label_index]["type"]:
num_correct += 1
infer_index += 1
label_index += 1
base_index += max_len * len(lens)
return num_label, num_infer, num_correct
def calculate_f1(num_label, num_infer, num_correct):
if num_infer == 0:
precision = 0.0
else:
precision = num_correct * 1.0 / num_infer
if num_label == 0:
recall = 0.0
else:
recall = num_correct * 1.0 / num_label
if num_correct == 0:
f1 = 0.0
else:
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def calculate_f1_np(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
p = tp / (tp + fp) if (tp + fp) else 0
r = tp / (tp + fn) if (tp + fn) else 0
f1 = (2 * p * r) / (p + r) if p + r else 0
return p, r, f1
def matthews_corrcoef(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
tp = np.sum((labels == 1) & (preds == 1))
tn = np.sum((labels == 0) & (preds == 0))
fp = np.sum((labels == 0) & (preds == 1))
fn = np.sum((labels == 1) & (preds == 0))
div = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
mcc = ((tp * tn) - (fp * fn)) / np.sqrt(div) if div else 0
return mcc
def recall_nk(data, n, k, m):
"""
This metric can be used to evaluate whether the model can find the correct response B for question A
Note: Only applies to each question A only has one correct response B1.
Parameters
----------
data: List. Each element is a tuple, consist of the positive probability of the sample prediction and its label.
For each example, the only one true positive sample should be the first tuple.
n: int. The number of labels per example.
eg: [A,B1,1], [A,B2,0], [A,B3,0] n=3 as there has 3 labels for example A
k: int. If the top k is right, the example will be considered right.
eg: [A,B1,1]=0.5, [A,B2,0]=0.8, [A,B3,0]=0.3(Probability of 1)
If k=2, the prediction for the example A will be considered correct as 0.5 is the top2 Probability
If k=1, the prediction will be considered wrong as 0.5 is not the biggest probability.
m: int. For every m examples, there's going to be a positive sample.
eg. data= [A1,B1,1], [A1,B2,0], [A1,B3,0], [A2,B1,1], [A2,B2,0], [A2,B3,0]
For every 3 examples, there will be one positive sample. so m=3, and n can be 1,2 or 3.
"""
def get_p_at_n_in_m(data, n, k, ind):
"""
calculate precision in recall n
"""
pos_score = data[ind][0]
curr = data[ind:ind + n]
curr = sorted(curr, key=lambda x: x[0], reverse=True)
if curr[k - 1][0] <= pos_score:
return 1
return 0
correct_num = 0.0
length = len(data) // m
for i in range(0, length):
ind = i * m
assert data[ind][1] == 1
correct_num += get_p_at_n_in_m(data, n, k, ind)
return correct_num / length
def simple_accuracy(preds, labels):
preds = np.array(preds)
labels = np.array(labels)
return (preds == labels).mean()
def _get_ngrams(segment, max_order):
"""
Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
smooth=False):
"""
Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (reference, translation) in zip(reference_corpus, translation_corpus):
reference_length += len(reference)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for order in range(1, max_order + 1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order - 1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (
float(matches_by_order[i]) / possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
elif ratio > 0.0:
bp = math.exp(1 - 1. / ratio)
else:
bp = 0
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
```
#### File: finetune/evaluator/cmrc2018_evaluate.py
```python
from __future__ import print_function
from collections import OrderedDict
import os
import re
import nltk
from paddlehub.common.dir import THIRD_PARTY_HOME
from paddlehub.common.downloader import default_downloader
_PUNKT_URL = "https://paddlehub.bj.bcebos.com/paddlehub-thirdparty/punkt.tar.gz"
# split Chinese with English
def mixed_segmentation(in_str, rm_punc=False):
nltk_path = os.path.join(THIRD_PARTY_HOME, "nltk_data")
tokenizers_path = os.path.join(nltk_path, "tokenizers")
punkt_path = os.path.join(tokenizers_path, "punkt")
if not os.path.exists(punkt_path):
default_downloader.download_file_and_uncompress(
url=_PUNKT_URL, save_path=tokenizers_path, print_progress=True)
nltk.data.path.append(nltk_path)
in_str = str(in_str).lower().strip()
segs_out = []
temp_str = ""
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':',
'?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(',
')', '-', '~', '『', '』'
]
for char in in_str:
if rm_punc and char in sp_char:
continue
if re.search(r'[\u4e00-\u9fa5]', char) or char in sp_char:
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
# handling last part
if temp_str != "":
ss = nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
# remove punctuation
def remove_punctuation(in_str):
in_str = str(in_str).lower().strip()
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。', ':',
'?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「', '」', '(',
')', '-', '~', '『', '』'
]
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# find longest common string
def find_lcs(s1, s2):
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
def evaluate(ground_truth_file, prediction_file):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instance in ground_truth_file:
# context_id = instance['context_id'].strip()
# context_text = instance['context_text'].strip()
for para in instance["paragraphs"]:
for qas in para['qas']:
total_count += 1
query_id = qas['id'].strip()
query_text = qas['question'].strip()
answers = [x["text"] for x in qas['answers']]
if query_id not in prediction_file:
print('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
prediction = str(prediction_file[query_id])
f1 += calc_f1_score(answers, prediction)
em += calc_em_score(answers, prediction)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
return f1_score, em_score, total_count, skip_count
def calc_f1_score(answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = mixed_segmentation(ans, rm_punc=True)
prediction_segs = mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0 * lcs_len / len(prediction_segs)
recall = 1.0 * lcs_len / len(ans_segs)
f1 = (2 * precision * recall) / (precision + recall)
f1_scores.append(f1)
return max(f1_scores)
def calc_em_score(answers, prediction):
em = 0
for ans in answers:
ans_ = remove_punctuation(ans)
prediction_ = remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
def get_eval(original_file, prediction_file):
F1, EM, TOTAL, SKIP = evaluate(original_file, prediction_file)
AVG = (EM + F1) * 0.5
output_result = OrderedDict()
output_result['AVERAGE'] = AVG
output_result['F1'] = F1
output_result['EM'] = EM
output_result['TOTAL'] = TOTAL
output_result['SKIP'] = SKIP
return output_result
```
#### File: paddlehub/network/classification.py
```python
import paddle
import paddle.fluid as fluid
def bilstm(token_embeddings, hid_dim=128, hid_dim2=96):
"""
BiLSTM network.
"""
fc0 = fluid.layers.fc(input=token_embeddings, size=hid_dim * 4)
rfc0 = fluid.layers.fc(input=token_embeddings, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
rlstm_h, c = fluid.layers.dynamic_lstm(
input=rfc0, size=hid_dim * 4, is_reverse=True)
lstm_last = fluid.layers.sequence_last_step(input=lstm_h)
rlstm_last = fluid.layers.sequence_last_step(input=rlstm_h)
lstm_last_tanh = fluid.layers.tanh(lstm_last)
rlstm_last_tanh = fluid.layers.tanh(rlstm_last)
# concat layer
lstm_concat = fluid.layers.concat(input=[lstm_last, rlstm_last], axis=1)
# full connect layer
fc = fluid.layers.fc(input=lstm_concat, size=hid_dim2, act='tanh')
return fc
def bow(token_embeddings, hid_dim=128, hid_dim2=96):
"""
BOW network.
"""
# bow layer
bow = fluid.layers.sequence_pool(input=token_embeddings, pool_type='sum')
bow_tanh = fluid.layers.tanh(bow)
# full connect layer
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
return fc_2
def cnn(token_embeddings, hid_dim=128, win_size=3):
"""
CNN network.
"""
# cnn layer
conv = fluid.nets.sequence_conv_pool(
input=token_embeddings,
num_filters=hid_dim,
filter_size=win_size,
act="tanh",
pool_type="max")
# full connect layer
fc_1 = fluid.layers.fc(input=conv, size=hid_dim)
return fc_1
def dpcnn(token_embeddings,
hid_dim=128,
channel_size=250,
emb_dim=1024,
blocks=6):
"""
Deep Pyramid Convolutional Neural Networks is implemented as ACL2017 'Deep Pyramid Convolutional Neural Networks for Text Categorization'
For more information, please refer to https://www.aclweb.org/anthology/P17-1052.pdf.
"""
def _block(x):
x = fluid.layers.relu(x)
x = fluid.layers.conv2d(x, channel_size, (3, 1), padding=(1, 0))
x = fluid.layers.relu(x)
x = fluid.layers.conv2d(x, channel_size, (3, 1), padding=(1, 0))
return x
emb = fluid.layers.unsqueeze(token_embeddings, axes=[1])
region_embedding = fluid.layers.conv2d(
emb, channel_size, (3, emb_dim), padding=(1, 0))
conv_features = _block(region_embedding)
conv_features = conv_features + region_embedding
# multi-cnn layer
for i in range(blocks):
block_features = fluid.layers.pool2d(
conv_features,
pool_size=(3, 1),
pool_stride=(2, 1),
pool_padding=(1, 0))
conv_features = _block(block_features)
conv_features = block_features + conv_features
features = fluid.layers.pool2d(conv_features, global_pooling=True)
features = fluid.layers.squeeze(features, axes=[2, 3])
# full connect layer
fc_1 = fluid.layers.fc(input=features, size=hid_dim, act="tanh")
return fc_1
def gru(token_embeddings, hid_dim=128, hid_dim2=96):
"""
GRU network.
"""
fc0 = fluid.layers.fc(input=token_embeddings, size=hid_dim * 3)
gru_h = fluid.layers.dynamic_gru(input=fc0, size=hid_dim, is_reverse=False)
gru_max = fluid.layers.sequence_pool(input=gru_h, pool_type='max')
gru_max_tanh = fluid.layers.tanh(gru_max)
fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh')
return fc1
def lstm(token_embeddings, hid_dim=128, hid_dim2=96):
"""
LSTM network.
"""
# lstm layer
fc0 = fluid.layers.fc(input=token_embeddings, size=hid_dim * 4)
lstm_h, c = fluid.layers.dynamic_lstm(
input=fc0, size=hid_dim * 4, is_reverse=False)
# max pooling layer
lstm_max = fluid.layers.sequence_pool(input=lstm_h, pool_type='max')
lstm_max_tanh = fluid.layers.tanh(lstm_max)
# full connect layer
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
return fc1
```
#### File: paddlehub/tokenizer/tokenizer_util.py
```python
from collections import OrderedDict
import unicodedata
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = {}
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n").split("\t")[0]
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (
cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def is_chinese_char(char):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
cp = ord(char)
if ((cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
``` |
{
"source": "18633277619/YIBAN01",
"score": 3
} |
#### File: 18633277619/YIBAN01/main.py
```python
# !!!!!!!!!!!!!!!!!!!!特别注意!!!!!!!!!!!!!!!!!!!!
# 如果出现发热、干咳、体寒、体不适、胸痛、鼻塞、流鼻涕、恶心、腹泻等症状。
# 请立即停止使用本项目(yibanAutocheakin),认真实履行社会义务,及时进行健康申报。
# !!!!!!!!!!!!!!!!!!!!特别注意!!!!!!!!!!!!!!!!!!!!
# 如有侵权,请提供相关证明,所有权证明,本人收到后删除相关文件。
# 无论以任何方式查看、复制或使用到本项目(yibanAutocheakin)中的任何脚本,都应该仔细阅读此声明。本人保留随时更改或补充的此免责声明的权利。
# 一旦使用并复制了本项目(yibanAutocheakin)的任何相关脚本,则默认视为您已经接受了此免责声明。
# 使用并复制了本项目(yibanAutocheakin)的任何相关脚本或本人制作的任何脚本,则默认视为您已经接受了此免责声明。请仔细阅读
# 运行要求:python3.6.4 , requests库
# 使用方法————修改完成且正确后可选择:服务器shell脚本执行.py文件、Windows计划任务执行.py文件、每日手动执行
# 建议凌晨00:00开始执行
# 功能说明————自动进行易班打卡签到,签到间隔可由jiange()函数中的内容控制
# !!!注意!!!
# 本代码中的每一处注释都有非常重要的意义,按注释操作绝对可以正常使用(截至2022年1月11日)
# !!!注意!!!
import requests # request库需另外安装,安装方法建议百度,不再赘述
import os
import json
import time
from threading import Thread
def jiange():
time.sleep(10800) # ()中的是签到成功后再次签到所间隔的秒数
class KSClient(object):
# 经开发者测试,由于签到需要验证码,而验证码的识别难度又非常高,一般的图像处理后识别的方法很难实现正确读取
# 为此开发者甚至通过卷积神经网络运算,跑了10万张验证码,训练了识别模型,可成功率依然有限
# 故借用识别平台,该平台每天有20次免费识别机会,个人使用绰绰有余
# 平台地址———— http://fast.net885.com/ 点左下角立即注册,注册完成后在下边各处填入用户名和密码,平台很便宜
# !!!在签到主函数中也有一处用户名和密码,千万别忘记填!!!
# !!!!!!!!!这不是识别平台的广告!!!!!!!!!!!
def __init__(self):
self.username = 'username' # ''中填用户名
self.Token = '' # 不管
self.headers = {
'Connection': 'Keep-Alive',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0)',
}
# 获取taken
def GetTaken(self, username, password): # (self,用户名,密码)依次填入用户名和密码
brtn = False
r = requests.get('http://api.95man.com:8888/api/Http/UserTaken?user=' + username + '&pwd=' + password + '&isref=0',
headers=self.headers) # 第一个加号后填用户名,第三个加号后填密码
arrstr = r.text.split('|')
if (arrstr[0] == '1'):
self.username = username # 等号后填用户名
self.Token = arrstr[1]
brtn = True
return brtn
# 识别图片
def PostPic(self, filepath, codetype):
"""
imbyte: 图片字节
imgtype: 类型 1为通用类型 更多精准类型请参考 http://fast.net885.com/auth/main.html
"""
strRtn = ''
imbyte = open(filepath, 'rb').read()
filename = os.path.basename(filepath)
files = {'imgfile': (filename, imbyte)}
r = requests.post(
'http://api.95man.com:8888/api/Http/Recog?Taken=' + self.Token + '&imgtype=' + str(codetype) + '&len=0',
files=files, headers=self.headers)
arrstr = r.text.split('|')
# 返回格式:识别ID|识别结果|用户余额
if (int(arrstr[0]) > 0):
strRtn = arrstr[1]
return strRtn
def qiandao(hbnd, hbndt): # 主签到函数
global wdnmd
s = requests.Session()
Ks95man = KSClient()
code = None
web = "http://211.68.191.30/epidemic/index?token=%s" % (hbnd)
r = s.get(web)
r = s.get("http://211.68.191.30/epidemic/captcha")
with open("1.png", "wb+") as f:
f.write(r.content)
f.close()
if Ks95man.GetTaken('username', 'password'): # 按(用户名,密码)的格式依次填入
code = Ks95man.PostPic('1.png', 1)
sign_content = hbndt
dat = {"data": sign_content, "captcha": code}
r = s.post("http://211.68.191.30/epidemic/student/sign", data=dat)
text = json.loads(r.text)
print(text)
try:
nmd = text["code"]
except:
rnm = text["status"]
if (rnm) == 500:
cnm = 0
while (cnm) < 10:
print(cnm)
chongshi(hbnd, hbndt)
if (wdnmd) == 1:
jiange()
qiandao(hbnd, hbndt)
else:
cnm = cnm + 1
time.sleep(5)
else:
print("签到失败")
tongzhi("签到失败") # 调用通知函数,并发送()中的内容
else:
print(nmd)
if (nmd) == -1:
time.sleep(1)
qiandao(hbnd, hbndt)
else:
jiange()
qiandao(hbnd, hbndt)
def chongshi(hbnd, hbndt): # 失败重试函数
global wdnmd
s = requests.Session()
Ks95man = KSClient()
code = None
web = "http://2192.168.3.11/epidemic/index?token=%s" % (hbnd)
r = s.get(web)
r = s.get("http://211.68.191.30/epidemic/captcha")
with open("1.png", "wb+") as f:
f.write(r.content)
f.close()
if Ks95man.GetTaken('zwxym', 'zhao0427'):
code = Ks95man.PostPic('1.png', 1)
sign_content = hbndt
dat = {"data": sign_content, "captcha": code}
r = s.post("http://172.16.31.10/epidemic/student/sign", data=dat)
text = json.loads(r.text)
try:
nmd = text["code"]
except:
wdnmd = 0
else:
wdnmd = 1
def tongzhi(text): # 通知函数
key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" # 百度搜”server酱“用微信扫码登录后获取一个key,粘贴到这里
url = "https://sctapi.ftqq.com/%s.send" % (key)
title = u"通知" # 通知标题,可自定义
content = text # 通知内容修改qiandao()函数中,130行tongzhi()括号中的内容即可
data = {"text": title, "desp": content}
requests.post(url, data)
def main():
global nmd
global code
global rnm
global cnm
global wdnmd
global hbnd
global hbndt
global xxx # 这个变量名可以自己换
xxx = "" # 此处填写的是你的 token
# 获取方法
# 你的签到token,获取方法:进入“易办大厅”点右上角三点,再点复制链接,将链接发送到电脑,复制到chrome浏览器打开
# 右击页面,点击检查,点击右侧区域上边一排选项中的“网络”,再在左侧页面中点击“疫情放控签到”
# 此时左侧会出现一些网址,当中的第一个是一个包含了token的链接,复制链接,留下“token=“后变的内容
# 并将该内容粘贴于上方双引号中,完成token配置
# !!!注意!!!
# token不是一成不变的,会不定时重置,本程序还不能实现自动跟随重置
# 故当收到server酱通知时,请首先检查token是否发生更改
# 方法是——登录对应账号的,查看进入易办大厅时,查看是否需要重新授权,如需要,请重复上述获取token的操作
# 如您有一定的渗透基础,有抓包经验,麻烦您将授权过程的抓包结果发送至 <EMAIL> ,帮助开发者完成易班第三方网站verify_request的获取
# 如您没有基础,麻烦您在授权页面右上角点击复制链接,并将此链接发送至 <EMAIL> ,帮助开发者完成易班第三方网站verify_request的获取
# !谢谢各位的支持!
xxxt = '''{
"realName":"你的名字————例:”大猛子“",
"collegeName":"你的学院全称————例:”城乡建设学院“",
"className":"你的专业全程和班级如————例:”土木工程1701“",
"studentId":"你的学号————例”没有例“",
"answer":
"{
\\"q1\\":\\"是\\",
\\"q2\\":\\"是\\",
\\"q3\\":\\"是\\",
\\"q4\\":\\"是\\",
\\"q4_1\\":\\"\\",
\\"q5\\":\\"是\\",
\\"q6\\":\\"是\\",
\\"q6_1\\":\\"\\",
\\"position\\":\\"你的地址\\ "
}"
}'''
# !!!注意!!!
# 上边四项需要根据签到者的实际信息来填写,一定要填写在" "之间,删掉例子,在" "中间填写。
# answer中的内容只要签到页面不改,就不会失效,需要注意的是最后的地址,删掉”你的地址“,在"后填写你的住址
# 如果签到内容发生变化,可参照获取token的那个办法,复制易办大厅的地址后,进入”疫情防控签到页面“,再点击签到按键,进入签到页面,再右击检查
# 自行查看源码中的各个”q“(问题)是否发生变化,根据网页内容,参照现有程序,增加或修改answer中的内容
# 一般出问题都是签到增加了问题,就在位置行上变,仿照上文自行插入内容
# 本开发者承诺本开源代码没有任何”后门“来获取同学们的个人信息
# -------------------------------------------------------------------------------------------------------------------
# !!!注意!!!
# 本开源程序采用”极为先进“的多线程方式,可并行运行多个任务,添加方式如下
# 将第二个人的token填如下方”yyy“的""中
# global yyy
# yyy = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# 将第二个人的信息与地址填到下方的”yyyt“中,地址在最后,第三个人的依次类推,切记要先定义全局变量——global xxx
# global yyyt
# yyyt = '''{"realName":"xx","collegeName":"xxxxxx","className":"xxxxxxxxx","studentId":"xxxxxxxxxxxxx","answer":"{\\"q1\\":\\"是\\",\\"q2\\":\\"是\\",\\"q3\\":\\"是\\",\\"q4\\":\\"是\\",\\"q4_1\\":\\"\\",\\"q5\\":\\"是\\",\\"q6\\":\\"是\\",\\"q6_1\\":\\"\\",\\"position\\":\\"xxxxxxxxxx\\"}"}'''
# global zzz
# zzz = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# global zzzt
# zzzt = '''{"realName":"xx","collegeName":"xxxxxxx","className":"xxxxxxxxx","studentId":"xxxxxxxxxxxxx","answer":"{\\"q1\\":\\"是\\",\\"q2\\":\\"是\\",\\"q3\\":\\"是\\",\\"q4\\":\\"是\\",\\"q4_1\\":\\"\\",\\"q5\\":\\"是\\",\\"q6\\":\\"是\\",\\"q6_1\\":\\"\\",\\"position\\":\\"xxxxxxxxx\\"}"}'''
# ......
t1 = Thread(target=qiandao, args=(xxx, xxxt,)) # 定义线程t1,线程任务为调用qiandao()函数,参数是xxx和xxxt,进行第一个人的签到
# t2 = Thread(target=qiandao, args=(yyy,yyyt,)) # 定义线程t2,线程任务为调用qiandao()函数,进行第二个人的签到
# t3 = Thread(target=qiandao, args=(zzz,zzzt,)) #定义线程t3,线程任务为调用qiandao()函数,进行第三个人的签到
# ......
t1.start() # 开始运行t1线程,要签那个人的就运行那个线程
# t2.start()
# t3.start()
# ......
if __name__ == "__main__": #入口函数
main()
``` |
{
"source": "18641315209/ovirt-engine",
"score": 3
} |
#### File: ovirt-ova-extract/files/extract_ova.py
```python
import io
import mmap
import os
import sys
from contextlib import closing
NUL = b"\0"
BUF_SIZE = 8 * 1024**2
TAR_BLOCK_SIZE = 512
def extract_disk(ova_file, disk_size, image_path):
fd = os.open(image_path, os.O_RDWR | os.O_DIRECT)
buf = mmap.mmap(-1, BUF_SIZE)
with closing(buf), io.FileIO(fd, "r+", closefd=True) as image:
copied = 0
while copied < disk_size:
read = ova_file.readinto(buf)
remaining = disk_size - copied
if remaining < read:
# read too much (disk size is not aligned
# with BUF_SIZE), thus need to go back
ova_file.seek(remaining - read, 1)
read = remaining
written = 0
while written < read:
wbuf = buffer(buf, written, read - written)
written += image.write(wbuf)
copied += written
def nts(s, encoding, errors):
"""
Convert a null-terminated bytes object to a string.
Taken from tarfile.py.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""
Convert a number field to a python number.
Taken from tarfile.py.
"""
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
s = nts(s, "ascii", "strict")
n = int(s.strip() or "0", 8)
except ValueError:
print 'invalid header'
raise
return n
def extract_disks(ova_path, image_paths):
fd = os.open(ova_path, os.O_RDONLY | os.O_DIRECT)
buf = mmap.mmap(-1, TAR_BLOCK_SIZE)
with io.FileIO(fd, "r", closefd=True) as ova_file, \
closing(buf):
while True:
# read next tar info
ova_file.readinto(buf)
info = buf.read(512)
# tar files end with NUL blocks
if info == NUL*512:
break
# preparation for the next iteration
buf.seek(0)
# extract the next disk to the corresponding image
name = nts(info[0:100], 'utf-8', 'surrogateescape')
size = nti(info[124:136])
if name.lower().endswith('ovf'):
jump = size
# ovf is typically not aligned to 512 bytes blocks
remainder = size % TAR_BLOCK_SIZE
if remainder:
jump += TAR_BLOCK_SIZE - remainder
ova_file.seek(jump, 1)
else:
for image_path in image_paths:
if name in image_path:
extract_disk(ova_file, size, image_path)
break
if len(sys.argv) < 3:
print ("Usage: extract_ova.py ova_path disks_paths")
sys.exit(2)
extract_disks(sys.argv[1], sys.argv[2].split('+'))
```
#### File: ovirt-ova-pack/files/pack_ova.py
```python
import io
import mmap
import os
import sys
import tarfile
import time
from contextlib import closing
TAR_BLOCK_SIZE = 512
NUL = b"\0"
BUF_SIZE = 8 * 1024**2
def create_tar_info(name, size):
info = tarfile.TarInfo(name)
info.size = size
info.mtime = time.time()
return info
def pad_to_block_size(file):
remainder = file.tell() % TAR_BLOCK_SIZE
if remainder:
padding_size = TAR_BLOCK_SIZE - remainder
file.write(NUL * padding_size)
def write_ovf(ova_path, ovf):
ovf = ovf.encode('utf-8')
print ("writing ovf: %s" % ovf)
with io.open(ova_path, "r+b") as ova_file:
tar_info = create_tar_info("vm.ovf", len(ovf))
ova_file.write(tar_info.tobuf())
ova_file.write(ovf)
pad_to_block_size(ova_file)
os.fsync(ova_file.fileno())
def write_disk(ova_path, disk_path, disk_size):
print ("writing disk: path=%s size=%d" % (disk_path, disk_size))
disk_name = os.path.basename(disk_path)
tar_info = create_tar_info(disk_name, disk_size)
with io.open(ova_path, "a+b") as ova_file:
# write tar info
ova_file.write(tar_info.tobuf())
os.fsync(ova_file.fileno())
fd = os.open(ova_path, os.O_RDWR | os.O_DIRECT | os.O_APPEND)
with io.FileIO(fd, "a+", closefd=True) as ova_file:
# write the disk content
buf = mmap.mmap(-1, BUF_SIZE)
fd = os.open(disk_path, os.O_RDONLY | os.O_DIRECT)
with closing(buf), \
io.FileIO(fd, "r", closefd=True) as image:
while True:
read = image.readinto(buf)
if read == 0:
break # done
written = 0
while written < read:
wbuf = buffer(buf, written, read - written)
written += ova_file.write(wbuf)
os.fsync(ova_file.fileno())
def write_disks(ova_path, disks_info):
for disk_info in disks_info:
# disk_info is of the following structure: <full path>::<size in bytes>
idx = disk_info.index('::')
disk_path = disk_info[:idx]
disk_size = int(disk_info[idx+2:])
write_disk(ova_path, disk_path, disk_size)
def write_null_blocks(ova_file):
with io.open(ova_path, "a+b") as ova_file:
ova_file.write(NUL * 2 * TAR_BLOCK_SIZE)
if len(sys.argv) < 3:
print ("Usage: pack_ova.py output_path ovf [disks_info]")
sys.exit(2)
ova_path = sys.argv[1]
ovf = sys.argv[2]
write_ovf(ova_path, ovf)
if len(sys.argv) > 3:
disks_info = sys.argv[3]
write_disks(ova_path, disks_info.split('+'))
# write two null blocks at the end of the file
write_null_blocks(ova_path)
``` |
{
"source": "18645956947/TripleIE",
"score": 2
} |
#### File: 18645956947/TripleIE/cli.py
```python
import argparse
from ie import TripleIE
def parse_args():
parser = argparse.ArgumentParser('TripleIE')
parser.add_argument('--data', type=str, default='data/normalize.txt',
help='the path to the data')
parser.add_argument('--out', type=str, default='output/normalize_out.txt',
help='the path to output')
parser.add_argument('--ltp', type=str, default='ltp_data',
help='the path to LTP model')
parser.add_argument('--clean', action='store_true',
help='output the clean relation(no tips)')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
IE = TripleIE(args.data, args.out, args.ltp, args.clean)
IE.run()
```
#### File: utils/rules/population.py
```python
import re
from utils.rules.base import Base
class Population(Base):
def __init__(self, sentence):
super(Population, self).__init__(sentence)
def get_result(self):
# 时间规则
str = ''
m_1 = re.search(r'(大前天|前天|昨天|今天|上午|下午|晚上)(.+?)到(大前天|前天|昨天|今天|上午|下午|晚上)?(.+?)点?', self.sentence)
m_2 = re.search(r'(大前年|前年|去年|上一年|前一年|今年)(.+?)到(大前年|前年|去年|上一年|前一年|今年)?(.+?)月?', self.sentence)
m_3 = re.search(r'[有]+(.*)', self.sentence)
if m_1:
str = m_1.group()
sentence = self.sentence.replace(str, '')
elif m_2:
str = m_2.group()
sentence = self.sentence.replace(str, '')
elif m_3:
str = m_3.group()
str_rt = self.sentence[:len(self.sentence) - len(str)]
sentence = '人口' + str
return sentence, str_rt, 'population_m5'
else:
sentence = self.sentence
return sentence, str, 'population'
```
#### File: utils/rules/temperature.py
```python
import re
from utils.rules.base import Base
class Temperature(Base):
def __init__(self, sentence):
super(Temperature, self).__init__(sentence)
def get_result(self):
# 气温规则
str = ''
m_1 = re.search(
r'(最低气温|最高气温)(.+?)(大于|小于|大于等于|小于等于|等于|不等于|不大于|不小于|超过|不足|不超过)?(.+?)(\d+)度*', self.sentence)
if m_1:
str = m_1.group()
str = str[:4]
return self.sentence, str, 'temperature'
``` |
{
"source": "18681490423/TPShop",
"score": 2
} |
#### File: TPShop/page/page_home.py
```python
from selenium.webdriver.common.by import By
from base import BaseAction
class PageHome(BaseAction):
mine_button = By.XPATH, "text,我的", "resource-id,com.tpshop.malls:id/tab_txtv"
def click_mine(self):
self.click(self.mine_button)
``` |
{
"source": "18701016443/mayi",
"score": 3
} |
#### File: mayi/models/function.py
```python
from selenium import webdriver
import os
#截图函数
def insert_img(driver,file_name):
base_dir = os.path.dirname(os.path.dirname(__file__))
base_dir = str(base_dir)
base_dir = base_dir.replace("\\","/")
base = base_dir.split("/test_case")[0]
file_path = base + "/report/images/" + file_name
driver.get_screenshot_as_file(file_path)
if __name__ == "__main__":
driver = webdriver.Chrome()
driver.get("http://www.baidu.com")
insert_img(driver,"baidu.png")
driver.quit()
```
#### File: mayi/models/mydef.py
```python
from random import choice
import string,random
#产生随机数
def rad_num(num1,num2):
rad_num = random.randrange(num1,num2)
rad_num = str(rad_num)
return rad_num
#产生随机字母 + 数字
def rad_word(num):
characters = string.ascii_letters + string.digits
rad_word = "".join(choice(characters) for x in range(num))
return rad_word
#产生随机字母
def rad_only_word(num):
characters = string.ascii_letters
rad_word = "".join(choice(characters) for x in range(num))
return rad_word
```
#### File: test_case/page_obj/landlord_activity_page.py
```python
from .base import Pyse
class LandlordActivity(Pyse):
'''活动设置'''
# url = "/"
#活动页面文案
def text(self):
text= self.get_text("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[1]/p")
return text
#活动好处
def active_good(self):
self.click("class=>active_good")
#活动好处弹窗关闭按钮
def img_close(self):
self.click("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[4]/div[2]/img")
#活动规则
def regular_desc(self):
self.click("class=>regular_desc")
#活动规则弹窗文案
def regular_desc_text(self):
text = self.get_text("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[3]/div[2]")
return text
#活动规则弹窗关闭按钮
def regular_desc_close(self):
self.click("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[3]/div[2]/img")
```
#### File: test_case/page_obj/landlord_alias_page.py
```python
from .base import Pyse
from models import mydef
class LandlordAliasPage(Pyse):
'''房源管理-设置别名'''
# url = "/"
#设置别名
def alias(self):
self.click("xpath=>html/body/div[14]/div[5]/div/div/div[1]/div[1]/div[2]/div[3]/p[2]/input")
#输入别名
def input_alias(self):
alias = mydef.rad_word(5)
self.clear("xpath=>html/body/div[14]/div[5]/div/div/div[1]/div[1]/div[2]/div[3]/p[1]/input")
self.type("xpath=>html/body/div[14]/div[5]/div/div/div[1]/div[1]/div[2]/div[3]/p[1]/input",alias)
```
#### File: test_case/page_obj/landlord_invalid_page.py
```python
from .base import Pyse
class LandlordInvalidPage(Pyse):
'''失效订单'''
# url = "/"
#失效订单
def invalid(self):
self.click("id=>invalid")
#订单状态
def status(self):
text = self.get_text("xpath=>/html/body/div[14]/div[5]/div/div[2]/div/div[1]/div[2]/div[5]/p[1]/span")
return text
#订单详情
def invalid_order_details(self):
self.click("xpath=>/html/body/div[14]/div[5]/div/div[2]/div/div[1]/div[2]/div[5]/p[2]/a")
#订单详情页——返回我的订单
def invalid_return_my_order(self):
self.click("xpath=>/html/body/div[14]/div/div/div[1]/div[2]/a")
#订单详情页——订单状态
def invalid_order_details_status(self):
text = self.get_text("xpath=>/html/body/div[14]/div/div/div[2]/div[1]/p/span")
return text
```
#### File: test_case/page_obj/landlord_nav_page.py
```python
from .base import Pyse
class LandlordNavPage(Pyse):
'''房东导航集'''
# url = "/"
#我是房东
def Iamlandlord(self):
self.click("xpath=>/html/body/div[1]/div/ul/li[3]/div/a")
#房东微信
def close_weiChat(self):
self.click("xpath=>/html/body/div[14]/div[2]/div[2]/img[1]")
#房源管理
def roommanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[2]/a")
#消息通知
def msgmanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[3]/a")
#结算管理
def settlements(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[4]/a")
#房东微店
def microshopmanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[5]/a")
#特色体验
def experience(self):
self.click("xpath=>//*[@id='experience']/a")
#房东微信
def landlordweixin(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[7]/a")
#房东讲堂
def forum(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[8]/a")
#管理规范
def manageStandard(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[9]/a")
#身份验证
def authentication(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[10]/a")
#个人信息
def accountmanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[11]/div/a[1]")
#收款设置
def paymentmanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[11]/div/a[2]")
#密码设置
def passwordmanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[11]/div/a[3]")
#活动设置
def activitymanager(self):
self.click("xpath=>/html/body/div[14]/div[5]/ul/li[11]/div/a[4]")
```
#### File: test_case/page_obj/landlord_offline_page.py
```python
from .base import Pyse
class LandlordOfflinePage(Pyse):
'''房源下线'''
# url = "/"
#房源下线
def room_offline(self):
self.click("xpath=>html/body/div[14]/div[5]/div/div/div[1]/div[1]/div[2]/div[5]/ul/li[4]/a")
#下线原因
def offline_reason(self,num):
self.click("xpath=>html/body/div[20]/div/div/div/div[2]/div[2]/div/label["+num+"]")
#操作下线
def offlineReasonBtn(self):
self.click("id=>offlineReasonBtn")
#下线房源弹窗——我知道了
def okOfflineSuccessBtn(self):
self.click("id=>okOfflineSuccessBtn")
#下线房源弹窗——“下线成功”文案
def offlineSuccess_text(self):
text = self.get_text("xpath=>html/body/div[21]/div/div/div/div[2]/div[1]")
return text
#关闭
def cancelOfflineReasonBtn(self):
self.click("id=>cancelOfflineReasonBtn")
#下线房源弹窗-我要下线
def offlineBtn(self):
self.click("id=>offlineBtn")
#下线房源弹窗-去价格房态
def gopriceCal(self):
self.click("id=>gopriceCal")
#下线房源弹窗文案
def offline_text(self):
text = self.get_text("xpath=>html/body/div[19]/div/div/div/div[2]/div[1]/div")
return text
```
#### File: test_case/page_obj/landlord_serach_page.py
```python
from .base import Pyse
from .landlord_nav_page import LandlordNavPage
import datetime
class LandlordSerachPage(Pyse):
'''房东订单搜索'''
# url = "/"
#入住开始时间
def beginCheckInDay(self):
#利用js移除readonly属性,两种方法都可以
# self.js("document.getElementById('beginCheckInDay').readOnly=false;")
self.js("document.getElementById('beginCheckInDay').removeAttribute('readonly');")
beginCheckInDay =datetime.datetime.now().strftime("%Y-%m-%d")
self.type("id=>beginCheckInDay",beginCheckInDay)
#入住结束时间
def endCheckInDay(self):
self.js("document.getElementById('endCheckInDay').removeAttribute('readonly');")
endCheckInDay =(datetime.datetime.now()+datetime.timedelta(days=1)).strftime("%Y-%m-%d")
self.type("id=>endCheckInDay",endCheckInDay)
#按订单号或手机号
def orderOrMoblie(self,orderOrMoblie):
self.clear("id=>orderOrMoblie")
self.type("id=>orderOrMoblie",orderOrMoblie)
#搜索
def serach(self):
self.click("xpath=>/html/body/div[14]/div[5]/div/div[1]/div[2]/input[2]")
```
#### File: test_case/page_obj/login_page.py
```python
from .base import Pyse
from time import sleep
class LoginPage(Pyse):
'''登录'''
url = "/"
#登录/注册
def loginshow(self):
self.click("xpath=>//*[@id='loginshow']")
#账号密码登录
def changeloginbyup(self):
self.click("id=>changeloginbyup")
#账号
def username(self,username):
self.type("id=>loginnamein",username)
#密码
def password(self,password):
self.type("id=>loginpassin",password)
#验证码
def imagecode1(self):
self.click("id=>imagecode1")
sleep(8)
#登录
def loginbyupdo(self):
self.click("id=>loginbyupdo")
#登录成功返回文案
def login_sucess(self):
text = self.get_text("id=>head_nickname")
return text
def login(self):
self.open()
self.loginshow()
self.changeloginbyup()
self.username(username="18701016443")
self.password(password="<PASSWORD>")
self.imagecode1()
self.loginbyupdo()
```
#### File: test_case/page_obj/mayi_QR_page.py
```python
from .base import Pyse
class MaYiQRPage(Pyse):
'''PC网页二维码'''
# url = "/"
#首页导航——APP首单立减5元——二维码
def nav_qr(self):
self.move_to_element("xpath=>/html/body/div[1]/div/ul/li[3]/div/a")
QR_url = self.get_attribute("xpath=>/html/body/div[1]/div/ul/li[3]/div/div/img","src")
return QR_url
#首页右侧保洁合作——APP首单立减5元——二维码
def right_QR(self):
self.move_to_element("xpath=>//*[@id='app-click']/a/b")
QR_url = self.get_attribute("xpath=>//*[@id='top_div']/img","src")
return QR_url
#谷歌下载页面文案打印
def text(self):
text = self.get_text("xpath=>/html/body")
return text
#首页底部二维码
def bottom_QR(self):
QR_url = self.get_attribute("css=>.mt10,.app_download_qr","src")
return QR_url
#首页底部Android下载
def Androiddown_btn(self):
self.click("css=>.app,.mt10 t-center")
#下载蚂蚁短租客户端手机注册立送5元——二维码
def bottom_left_QR(self):
QR_url = self.get_attribute("xpath=>//*[@id='floatingLayer']/div[1]/div[2]/div[1]/img","src")
return QR_url
#下载蚂蚁短租客户端手机注册立送5元——下载按钮
def bottom_left_andriodbtn(self):
self.click("xpath=>//*[@id='floatingLayer']/div[1]/div[2]/div[2]/a[2]")
#APP50下载页面的Android下载按钮
def APP50_andriodbtn(self):
self.click("xpath=>//*[@id='Android']/img")
#房东banner图上的二维码
def order_banner_QR(self):
QR_url = self.get_attribute("xpath=>//*[@id='box']/div[1]/ul/li[1]/img","src")
return QR_url
#下载页——拨打电话——二维码
def call_QR(self):
self.move_to_element("class=>call-phone")
QR_url = self.get_attribute("class=>chat_ma","src")
return QR_url
#房客订单管理右侧
def tenant_order_QR(self):
QR_url = self.get_attribute("xpath=>//*[@id='wx']/a/img","src")
return QR_url
#房东招募
def landlord_zhaomu_QR(self):
QR_url = self.get_attribute("xpath=>/html/body/div[14]/div[2]/div[2]/div[2]/div[3]/div[3]/img","src")
return QR_url
# def close_(self):
# self.click("xpath=>//*[@id='floatingLayer']/div[1]/div[2]/div[5]/img")
# 下载页——下载按钮
def down_btn(self):
self.click("xpath=>/html/body/section/div[1]/div/a")
#WAP页下载app
def wap_down_btn(self):
self.click("id=>app_download")
#WAP页底部下载
def wap_bottom_down(self):
self.click("class=>appdownload")
#联系房东
def lianxifangdong_down(self):
self.click("class=>pBox")
#房东主页下载app
def landlord_index_down(self):
self.click("xpath=>//*[@id='indexPage']/div[1]/div[3]/p[3]/a")
```
#### File: test_case/page_obj/tenant_againorder_page.py
```python
from .base import Pyse
class TenantAgainorderPage(Pyse):
'''房客重新下订单'''
#重新下定单
def againorder(self):
self.click("xpath=>/html/body/div[14]/div[1]/div/div[1]/div/div[4]/div[2]/div[6]/p/a")
```
#### File: test_case/page_obj/tenant_msgmanage_page.py
```python
from .base import Pyse
class TenantMsganagePage(Pyse):
'''房客消息通知'''
#回复
def return_msg(self):
self.click("xpath=>html/body/div[14]/div/div/div/div/div[1]/div[2]/ul/li[2]/span/span/a")
# 查看
def look(self):
self.click("xpath=>html/body/div[14]/div/div/div/div/div[1]/div[2]/ul/li[1]/a")
```
#### File: test_case/page_obj/wujiaqu_order_page.py
```python
from .base import Pyse
from models import mydef
class WujiaquOrderPage(Pyse):
'''五家渠下订单'''
# url = "/wujiaqu/"
#关键词
def searchcityin1(self):
searchcityin1 = "张佳恒测试"
self.type("id=>searchcityin1",searchcityin1)
#搜索
def landmarkBtn(self):
self.click("id=>landmarkBtn")
#第一个房源
def one_room(self):
list = []
dds = self.get_elements("xpath=>//*[@id='searchRoom']/dd")
for i in dds:
id = i.get_attribute("lid")
list.append(id)
self.open_new_window("xpath=>//*[@id="+list[2]+"]")
#立即预定
def goBookBtn(self):
self.click("id=>goBookBtn")
#入住人数
def people(self):
people = mydef.rad_num(1,10)
self.type("xpath=>html/body/form/div[2]/div[1]/div[1]/div/div/dl[3]/dd/input[2]",people)
#入住人姓名
def tenantname(self):
tenantname = mydef.rad_word(5)
self.clear("id=>tenantname")
self.type("id=>tenantname",tenantname)
def name(self):
n = self.get_text("id=>tenantname")
return n
# def phone(self):
# ph ="15539243368"
# self.clear("id=>tenantmobile")
# self.type("id=>tenantmobile",ph)
def xiug(self):
self.click("id=>editMobile")
#保险联系人
def user(self):
self.click("xpath=>//*[@id='insuranceDiv']/div/div[1]/div[1]/div/ul/li[1]/div/div[1]/span")
#提交订单
def submit_order(self):
self.click("xpath=>html/body/form/div[2]/div[1]/div[6]/div/a")
#订单提交成功后返回的文案
def order_success(self):
text = self.get_text("xpath=>//*[@id='payForm']/div/div[1]/div[2]/p[1]")
return text
```
#### File: mayi/test_case/test_landlord_alias.py
```python
from test_case.page_obj import login_page,landlord_nav_page,landlord_alias_page
from models import function,myunit
from time import sleep
import unittest
class TestAlias(myunit.MyTest):
'''房源别名'''
def test_alias(self):
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
po = landlord_alias_page.LandlordAliasPage(self.driver)
po.alias()
po.input_alias()
function.insert_img(self.driver,"alias.png")
if __name__ == "__mian__":
unittest.main()
```
#### File: mayi/test_case/test_landlord_microshopmanager.py
```python
from test_case.page_obj import login_page,landlord_nav_page,landlord_microshopmanager_page
from models import myunit,function
from time import sleep
import unittest
class TestMicroshopManager(myunit.MyTest):
'''房东微店'''
def test_edit_weidian(self):
'''编辑微店,更改微店名称和微店介绍'''
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).microshopmanager()
po = landlord_microshopmanager_page.LandlordMicroshopManagerPage(self.driver)
po.edit_weidian_btn()
sleep(2)
po.title()
sleep(1)
po.shop_introduction()
sleep(2)
function.insert_img(self.driver,"edit_weidian.png")
po.save_edit_btn()
def test_view_again(self):
'''查看房东说明'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).microshopmanager()
po = landlord_microshopmanager_page.LandlordMicroshopManagerPage(self.driver)
po.view_again_btn()
sleep(2)
print(po.microshop_text())
function.insert_img(self.driver,"view_weidian.png")
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_landlord_offline.py
```python
from test_case.page_obj import login_page,landlord_nav_page,landlord_offline_page
from models import function,myunit,mydef
from time import sleep
import unittest
class TestOffline(myunit.MyTest):
'''房源下线'''
def test_gopriceCal(self):
'''房源下线(去价格房态)——下线原因:我想休息,暂停出租'''
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
po = landlord_offline_page.LandlordOfflinePage(self.driver)
po.room_offline()
num = 1
po.offline_reason(str(num))
sleep(2)
po.offlineReasonBtn()
sleep(1)
po.gopriceCal()
sleep(2)
function.insert_img(self.driver,"gopriceCal.png")
def test_room_offline(self):
'''房源下线——下线原因:非我想休息,暂停出租'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
po = landlord_offline_page.LandlordOfflinePage(self.driver)
po.room_offline()
num = mydef.rad_num(2,9)
po.offline_reason(num)
sleep(2)
po.offlineReasonBtn()
function.insert_img(self.driver,"offlineSuccess.png")
print(po.offlineSuccess_text())
sleep(2)
po.okOfflineSuccessBtn()
def test_offline(self):
'''房源下线——下线原因:我想休息,暂停出租'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
po = landlord_offline_page.LandlordOfflinePage(self.driver)
po.room_offline()
num = 1
po.offline_reason(str(num))
po.offlineReasonBtn()
sleep(2)
function.insert_img(self.driver,"offline.png")
po.offlineBtn()
print(po.offlineSuccess_text())
sleep(2)
assert po.offlineSuccess_text()=="下线成功!"
po.okOfflineSuccessBtn()
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_landlord_read.py
```python
from test_case.page_obj import login_page,landlord_nav_page,landlord_read_page
from models import function,myunit
from time import sleep
import unittest
class TestLandlordRead(myunit.MyTest):
'''房源管理-房东必读'''
def test_agreement(self):
'''服务协议'''
login_page.LoginPage(self.driver).login()
sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
sleep(3)
po = landlord_read_page.LandlordReadPage(self.driver)
po.read()
sleep(3)
po.agreement()
print(po.agreement_text())
sleep(2)
function.insert_img(self.driver, "agreement.png")
def test_landlord_read(self):
'''房东规则'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
sleep(2)
po = landlord_read_page.LandlordReadPage(self.driver)
po.read()
sleep(2)
function.insert_img(self.driver,"landlord_read.png")
def test_tenant_rule(self):
'''房客规则'''
# login_page.LoginPage(self.driver).login()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(3)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
sleep(3)
po = landlord_read_page.LandlordReadPage(self.driver)
po.read()
sleep(3)
po.tenant_rule()
function.insert_img(self.driver,"tenant_rule.png")
def test_roomauditrule(self):
'''房间审核规范'''
# login_page.LoginPage(self.driver).login()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(3)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
sleep(3)
po = landlord_read_page.LandlordReadPage(self.driver)
po.read()
sleep(3)
po.roomauditrule()
function.insert_img(self.driver, "roomauditrule.png")
def test_privacypolicy(self):
'''隐私条款'''
# login_page.LoginPage(self.driver).login()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(3)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(3)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
sleep(3)
po = landlord_read_page.LandlordReadPage(self.driver)
po.read()
sleep(3)
po.privacypolicy()
print(po.privacypolicy_text())
function.insert_img(self.driver, "privacypolicy.png")
def test_disclaimer(self):
'''免责声明'''
# login_page.LoginPage(self.driver).login()
# sleep(3)
landlord_nav_page.LandlordNavPage( self.driver ).Iamlandlord()
sleep( 3 )
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
# sleep(3)
landlord_nav_page.LandlordNavPage( self.driver ).roommanager()
sleep( 3 )
po = landlord_read_page.LandlordReadPage( self.driver )
po.read()
sleep( 3 )
po.disclaimer()
print( po.disclaimer_text() )
function.insert_img( self.driver, "disclaimer.png" )
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_landlord_refund.py
```python
from test_case.page_obj import login_page,landlord_nav_page,landlord_refund_page
from models import myunit,function
from time import sleep
import unittest
class TestRefund(myunit.MyTest):
'''退款订单'''
def test_order_status(self):
'''进入订单详情页并打印状态'''
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
po = landlord_refund_page.LandlordRefundPage(self.driver)
po.refund()
po.refund_order_defails()
print(po.refund_order_details_status())
function.insert_img(self.driver,"order_details_status.png")
def test_return_myorder(self):
'''返回我的订单并打印第一个订单的状态'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
po = landlord_refund_page.LandlordRefundPage(self.driver)
po.refund()
po.refund_order_defails()
po.refund_return_my_order()
print(po.status())
function.insert_img(self.driver,"refund_order.png")
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_landlord_waitingcheckin.py
```python
from models import myunit,function
from test_case.page_obj import login_page,landlord_nav_page,landlord_waitingcheckin_page
from time import sleep
import unittest
class TestWaitingcheckin(myunit.MyTest):
'''待入住订单'''
def test_return_myorder(self):
'''从订单详情页返回到我的订单列表'''
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
po = landlord_waitingcheckin_page.LandlordWaitingcheckinPage(self.driver)
po.waitingcheckin()
po.wait_order_details()
po.wait_return_my_order()
print(po.status())
assert po.status()=="待入住"
function.insert_img(self.driver,"myorder.png")
def test_status(self):
'''进入订单详情页的并打印状态'''
# login_page.LoginPage(self.driver).login()
# sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
# landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
po = landlord_waitingcheckin_page.LandlordWaitingcheckinPage(self.driver)
po.waitingcheckin()
po.wait_order_details()
print(po.wait_order_details_status())
assert po.wait_order_details_status()=="待入住"
function.insert_img(self.driver,"wait_order_status.png")
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_online_editdata_fydes.py
```python
from test_case.page_obj import login_page,landlord_nav_page,fabu_room_page,landlord_online_editdata
from models import myunit
from time import sleep
import unittest
class TestEditdataFydes(myunit.MyTest):
'''修改房源'''
def test_edit_fy_des(self):
'''修改房源描述'''
login_page.LoginPage(self.driver).login()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).Iamlandlord()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).close_weiChat()
sleep(2)
landlord_nav_page.LandlordNavPage(self.driver).roommanager()
fb = fabu_room_page.FabuRoomPage(self.driver)
po = landlord_online_editdata.LandlordOnlineEditdata(self.driver)
po.editdata()
po.fy_des()
po.is_element_exist("xpath=>html/body/div[14]/div[2]/div/div/div[2]/form/div[1]/p/em")
sleep(2)
#之前的逻辑是修改地理位置,后来逻辑改为不可修改地理位置,因此注释以下代码
# po.EditAddress()
# sleep(2)
# po.changePosition()
sleep(1)
fb.title()
fb.intro()
sleep(2)
fb.landmark()
fb.traffic()
sleep(2)
fb.surroundings()
fb.userule()
sleep(2)
fb.otherintro()
sleep(2)
po.fydes_save()
print(po.editsuccess_text())
po.auditLodgeConfirmBtn()
if __name__ == "__main__":
unittest.main()
```
#### File: mayi/test_case/test_tenant_againorder.py
```python
from test_case.page_obj import login_page,tenant_nav_page,tenant_againorder_page,wujiaqu_order_page
from models import function,myunit
from time import sleep
import unittest
class TestAgainOrder(myunit.MyTest):
'''房客重新下单'''
@unittest.skip("跳过重新下单的用例")
def test_againorder(self):
'''重新下单'''
login_page.LoginPage(self.driver).login()
sleep(2)
tenant_nav_page.TenantNavPage(self.driver).Iamtenant()
po = tenant_againorder_page.TenantAgainorderPage(self.driver)
po.againorder()
po._open(url="/room/851272901")
sleep(2)
wo = wujiaqu_order_page.WujiaquOrderPage(self.driver)
wo.goBookBtn()
sleep(2)
wo.people()
# wo.tenantname()
sleep(2)
wo.user()
wo.submit_order()
sleep(2)
print(wo.order_success())
assert wo.order_success()=="订单提交成功"
function.insert_img(self.driver, "againorder_success.png")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "1871vinayak/Calculator",
"score": 4
} |
#### File: 1871vinayak/Calculator/calc.py
```python
def addition(x, y):
return x + y;
def subtraction(x, y):
return x - y;
def multiplication(x, y):
return x * y;
def division(x, y):
return x / y;
def raisePower(x, y):
return x ** y;
print("Operation to perform:");
print("1. Addition");
print("2. Subtraction");
print("3. Multiplication");
print("4. Division");
print("5. Raising a power to number");
choice = int(input("Enter choice: "));
num1 = int(input("Enter first number: "));
num2 = int(input("Enter second number: "));
if choice == 1:
print(num1, "+" ,num2, "=", addition(num1, num2));
elif choice == 2:
print(num1, "-", num2, "=", subtraction(num1, num2));
elif choice == 3:
print(num1, "*", num2, "=", multiplication(num1, num2));
elif choice == 4:
print(num1, "/", num2, "=", division(num1, num2));
elif choice == 5:
print(num1, "**", num2, "=", raisePower(num1, num2));
else:
print("Please select a valid input.");
``` |
{
"source": "18721017183/text_renderer-master",
"score": 3
} |
#### File: 18721017183/text_renderer-master/help_runner.py
```python
import os
import subprocess
# See parse_args for supported arguments
configs = [
dict(tag='test1',
num_img=10,
img_width=150),
dict(tag='test2',
num_img=20,
debug=False,
corpus_mode='list',
corpus_dir='./data/list_corpus')
]
def dict_to_args(config: dict):
args = []
for k, v in config.items():
if v is False:
continue
args.append('--%s' % k)
args.append('%s' % v)
return args
if __name__ == "__main__":
main_func = './main.py'
for config in configs:
args = dict_to_args(config)
print("Run with args: %s" % args)
subprocess.run(['python3', main_func] + args)
``` |
{
"source": "18724760121/baike_triples",
"score": 3
} |
#### File: baike_triples/spider/spider_main.py
```python
import url_manager, html_downloader, html_parser
import pickle
import os
import sys
import threading
import time
import urllib
class MyThread(threading.Thread):
def __init__(self,name):
threading.Thread.__init__(self)
self._running = True
self.name=name
# print(self.name)
def terminate(self):
self._running = False
def run(self):
# try:
pages=0
spendtime=0.
while urls.has_new_url() and self._running:
try:
start=time.time()
LOCK.acquire()
new_url = urls.get_new_url()
LOCK.release()
html_cont = downloader.download(new_url)
new_urls, _ = parser.parse(html_cont)
LOCK.acquire()
urls.add_new_urls(new_urls)
LOCK.release()
pages+=1
spendtime+=time.time()-start
cost=spendtime/pages
print(f"Thread:{self.name} id:{len(urls.old_urls)} URL:{urllib.parse.unquote(new_url).replace('https://baike.baidu.com/item/','')} {str(cost)[:4]}:sec/page")
except KeyboardInterrupt:
print('save state',sys.exc_info())
pickle.dump(urls, open('urls.bin', 'wb'))
except:
continue
if __name__=='__main__':
PATH='urls.pkl'
root_url = 'https://baike.baidu.com'
LOCK=threading.Lock()
urls = url_manager.UrlManager()
downloader = html_downloader.HtmlDownloader()
parser = html_parser.HtmlParser()
threads=[]
count_thread=12
if os.path.exists(PATH):
urls=pickle.load(open(PATH,'rb'))
else:
urls.add_new_url(root_url)
length=len(urls.new_urls)
print(f'build urls,length={length}')
for i in range(count_thread):
print(f'build thread {i}...')
threads.append(MyThread(str(i)))
try:
for t in threads:
t.start()
t.join()
except:
for t in threads:
t.terminate()
print('error!', sys.exc_info()[0])
finally:
print('finished,saving state')
pickle.dump(urls, open(PATH, 'wb'))
``` |
{
"source": "18730298725/tea-util",
"score": 2
} |
#### File: python/alibabacloud_tea_util/client.py
```python
import json
import uuid
import platform
import socket
import time
import Tea
import asyncio
from datetime import datetime
from urllib.parse import urlencode
from io import BytesIO
from Tea.model import TeaModel
from Tea.stream import READABLE
from typing import Any, BinaryIO, Dict, List
class Client:
"""
This is a utility module
"""
class __ModelEncoder(json.JSONEncoder):
def default(self, o: Any) -> Any:
if isinstance(o, TeaModel):
return o.to_map()
elif isinstance(o, bytes):
return o.decode('utf-8')
super().default(o)
@staticmethod
def __read_part(f, size=1024):
while True:
part = f.read(size)
if part:
yield part
else:
return
@staticmethod
def __get_default_agent():
return f'AlibabaCloud ({platform.system()}; {platform.machine()}) ' \
f'Python/{platform.python_version()} Core/{Tea.__version__} TeaDSL/1'
@staticmethod
def to_bytes(
val: str,
) -> bytes:
"""
Convert a string(utf8) to bytes
@return: the return bytes
"""
return val.encode(encoding="utf-8")
@staticmethod
def to_string(
val: bytes,
) -> str:
"""
Convert a bytes to string(utf8)
@return: the return string
"""
return val.decode('utf-8')
@staticmethod
def parse_json(
val: str,
) -> Any:
"""
Parse it by JSON format
@return: the parsed result
"""
try:
return json.loads(val)
except ValueError:
raise RuntimeError(f'Failed to parse the value as json format, Value: "{val}".')
@staticmethod
async def read_as_bytes_async(stream) -> bytes:
"""
Read data from a readable stream, and compose it to a bytes
@param stream: the readable stream
@return: the bytes result
"""
if isinstance(stream, bytes):
return stream
elif isinstance(stream, str):
return bytes(stream, encoding='utf-8')
else:
return await stream.read()
@staticmethod
async def read_as_string_async(stream) -> str:
"""
Read data from a readable stream, and compose it to a string
@param stream: the readable stream
@return: the string result
"""
buff = await Client.read_as_bytes_async(stream)
return Client.to_string(buff)
@staticmethod
async def read_as_json_async(stream) -> Any:
"""
Read data from a readable stream, and parse it by JSON format
@param stream: the readable stream
@return: the parsed result
"""
return Client.parse_json(
await Client.read_as_string_async(stream)
)
@staticmethod
def read_as_bytes(stream) -> bytes:
"""
Read data from a readable stream, and compose it to a bytes
@param stream: the readable stream
@return: the bytes result
"""
if isinstance(stream, READABLE):
b = b''
for part in Client.__read_part(stream, 1024):
b += part
return b
elif isinstance(stream, bytes):
return stream
else:
return bytes(stream, encoding='utf-8')
@staticmethod
def read_as_string(stream) -> str:
"""
Read data from a readable stream, and compose it to a string
@param stream: the readable stream
@return: the string result
"""
buff = Client.read_as_bytes(stream)
return Client.to_string(buff)
@staticmethod
def read_as_json(stream) -> Any:
"""
Read data from a readable stream, and parse it by JSON format
@param stream: the readable stream
@return: the parsed result
"""
return Client.parse_json(Client.read_as_string(stream))
@staticmethod
def get_nonce() -> str:
"""
Generate a nonce string
@return: the nonce string
"""
name = socket.gethostname() + str(uuid.uuid1())
namespace = uuid.NAMESPACE_URL
return str(uuid.uuid5(namespace, name))
@staticmethod
def get_date_utcstring() -> str:
"""
Get an UTC format string by current date, e.g. 'Thu, 06 Feb 2020 07:32:54 GMT'
@return: the UTC format string
"""
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
@staticmethod
def default_string(
real: str,
default: str,
) -> str:
"""
If not set the real, use default value
@return: the return string
"""
return real if real is not None else default
@staticmethod
def default_number(
real: int,
default: int,
) -> int:
"""
If not set the real, use default value
@return: the return number
"""
return real if real is not None else default
@staticmethod
def to_form_string(
val: dict,
) -> str:
"""
Format a map to form string, like a=a%20b%20c
@return: the form string
"""
if not val:
return ""
keys = sorted(list(val))
dic = {k: val[k] for k in keys if not isinstance(val[k], READABLE)}
return urlencode(dic)
@staticmethod
def to_jsonstring(
val: Any,
) -> str:
"""
Stringify a value by JSON format
@return: the JSON format string
"""
return json.dumps(val, cls=Client.__ModelEncoder)
@staticmethod
def empty(
val: str,
) -> bool:
"""
Check the string is empty?
@return: if string is null or zero length, return true
"""
return not val
@staticmethod
def equal_string(
val1: str,
val2: str,
) -> bool:
"""
Check one string equals another one?
@return: if equals, return true
"""
return val1 == val2
@staticmethod
def equal_number(
val1: int,
val2: int,
) -> bool:
"""
Check one number equals another one?
@return: if equals, return true
"""
return val1 == val2
@staticmethod
def is_unset(
value: Any,
) -> bool:
"""
Check one value is unset
@return: if unset, return true
"""
return value is None
@staticmethod
def stringify_map_value(
m: Dict[str, Any],
) -> Dict[str, str]:
"""
Stringify the value of map
@return: the new stringified map
"""
if m is None:
return {}
dic_result = {}
for k, v in m.items():
if v is not None:
if isinstance(v, bytes):
v = v.decode('utf-8')
else:
v = str(v)
dic_result[k] = v
return dic_result
@staticmethod
def anyify_map_value(
m: Dict[str, str],
) -> Dict[str, Any]:
"""
Anyify the value of map
@return: the new anyfied map
"""
return m
@staticmethod
def assert_as_boolean(
value: Any,
) -> bool:
"""
Assert a value, if it is a boolean, return it, otherwise throws
@return: the boolean value
"""
if not isinstance(value, bool):
raise ValueError(f'{value} is not a bool')
return value
@staticmethod
def assert_as_string(
value: Any,
) -> str:
"""
Assert a value, if it is a string, return it, otherwise throws
@return: the string value
"""
if not isinstance(value, str):
raise ValueError(f'{value} is not a str')
return value
@staticmethod
def assert_as_bytes(
value: Any,
) -> bytes:
"""
Assert a value, if it is a bytes, return it, otherwise throws
@return: the bytes value
"""
if not isinstance(value, bytes):
raise ValueError(f'{value} is not a bytes')
return value
@staticmethod
def assert_as_number(
value: Any,
) -> int:
"""
Assert a value, if it is a number, return it, otherwise throws
@return: the number value
"""
if not isinstance(value, (int, float)):
raise ValueError(f'{value} is not a number')
return value
@staticmethod
def assert_as_map(
value: Any,
) -> Dict[str, Any]:
"""
Assert a value, if it is a map, return it, otherwise throws
@return: the map value
"""
if not isinstance(value, dict):
raise ValueError(f'{value} is not a dict')
return value
@staticmethod
def get_user_agent(
user_agent: str,
) -> str:
"""
Get user agent, if it userAgent is not null, splice it with defaultUserAgent and return, otherwise return defaultUserAgent
@return: the string value
"""
if user_agent:
return f'{Client.__get_default_agent()} {user_agent}'
return Client.__get_default_agent()
@staticmethod
def is_2xx(
code: int,
) -> bool:
"""
If the code between 200 and 300, return true, or return false
@return: boolean
"""
return 200 <= code < 300
@staticmethod
def is_3xx(
code: int,
) -> bool:
"""
If the code between 300 and 400, return true, or return false
@return: boolean
"""
return 300 <= code < 400
@staticmethod
def is_4xx(
code: int,
) -> bool:
"""
If the code between 400 and 500, return true, or return false
@return: boolean
"""
return 400 <= code < 500
@staticmethod
def is_5xx(
code: int,
) -> bool:
"""
If the code between 500 and 600, return true, or return false
@return: boolean
"""
return 500 <= code < 600
@staticmethod
def validate_model(
m: TeaModel,
) -> None:
"""
Validate model
@return: void
"""
if isinstance(m, TeaModel):
m.validate()
@staticmethod
def to_map(
in_: TeaModel,
) -> Dict[str, Any]:
"""
Model transforms to map[string]any
@return: map[string]any
"""
if isinstance(in_, TeaModel):
return in_.to_map()
else:
return in_
@staticmethod
def sleep(
millisecond: int,
) -> None:
"""
Suspends the current thread for the specified number of milliseconds.
"""
time.sleep(millisecond / 1000)
@staticmethod
async def sleep_async(
millisecond: int,
) -> None:
"""
Suspends the current thread for the specified number of milliseconds.
"""
await asyncio.sleep(millisecond / 1000)
@staticmethod
def to_array(
input: Any,
) -> List[Dict[str, Any]]:
"""
Transform input as array.
"""
if input is None:
return []
out = []
for i in input:
if isinstance(i, TeaModel):
out.append(i.to_map())
else:
out.append(i)
return out
@staticmethod
def assert_as_readable(
value: Any,
) -> BinaryIO:
"""
Assert a value, if it is a readable, return it, otherwise throws
@return: the readable value
"""
if isinstance(value, str):
value = value.encode('utf-8')
if isinstance(value, bytes):
value = BytesIO(value)
elif not isinstance(value, READABLE):
raise ValueError(f'The value is not a readable')
return value
@staticmethod
def assert_as_array(
value: Any,
) -> list:
if not isinstance(value, list):
raise ValueError('The value is not a list')
return value
``` |
{
"source": "18756/ITMO_FS",
"score": 3
} |
#### File: filters/sparse/MLSFS.py
```python
import numpy as np
from sklearn.neighbors import NearestNeighbors
class MLSFS(object):
"""
Performs the Semi-supervised sparse feature selection algorithm based on multi-view
Laplacian regularization.
Parameters
----------
p : int
Number of features to select.
labled : int
Amount of first labled features.
ds : list of int
List of amount of features to each view.
neighbor : int
Amount of nearest neighbors to use while building the graph.
mu : float, optional
Parameter in the objective function.
lam : float, optional
Regularization parameter in the objective function.
gamma : float, optional
Parameter in the objective function that keeps view weights positive.
Gamma should be greater than one.
max_iterations : int, optional
Maximum amount of iterations to perform.
epsilon : positive float, optional
Specifies the needed residual between the target functions from consecutive iterations. If the residual
is smaller than epsilon, the algorithm is considered to have converged.
See Also
--------
https://www.sciencedirect.com/science/article/abs/pii/S0262885615000748
Examples
--------
"""
def __init__(self, p, labled, ds, neighbors, mu=1, lam=1, gamma=1.1, max_iterations=1000, epsilon=1e-5):
self.p = p
self.labled = labled
self.ds = ds
self.mu = mu
self.lam = lam
if gamma <= 1:
raise ValueError("Gamma should be greater than one, %d passed" % gamma)
self.gamma = gamma
self.neighbors = neighbors
self.max_iterations = max_iterations
if epsilon < 0:
raise ValueError("Epsilon should be positive, %d passed" % epsilon)
self.epsilon = epsilon
def run(self, X, y=None):
"""
Fits filter
Parameters
----------
X : numpy array, shape (n_samples, n_features)
The training input samples.
y : numpy array, shape (n_samples) or (n_samples, n_classes), optional
The target values of one or zero to first labled samples and zeros to other
Returns
----------
G : array-like, shape (n_features, c)
Projection matrix.
See Also
--------
Examples
--------
>>> from ITMO_FS.filters.sparse.MLSFS import MLSFS
>>> import numpy as np
>>> X = np.array([[1, 2, 3, 3, 1],[2, 2, 3, 3, 2], [1, 3, 3, 1, 3],\
[3, 1, 3, 1, 4],[4, 4, 3, 1, 5]], dtype = np.integer)
>>> y = np.array([1, 0, 1, 0, 0], dtype=np.integer)
>>> model = MLSFS(p=3, labled=3, ds=[2, 3], neighbors=3)
>>> weights = model.run(X)
>>> model.feature_ranking(weights)
"""
n_samples, n_features = X.shape
n_views = len(self.ds)
if len(y.shape) == 1:
y = np.array([[y_el] for y_el in y])
n_classes = y.shape[1]
view_matrices = []
cur_index = 0
for i in range(n_views):
matrix = []
for x in X:
matrix.append([x[j] for j in range(cur_index, cur_index + self.ds[i])])
cur_index += self.ds[i]
view_matrices.append(np.array(matrix).T)
X = X.T
S = [NearestNeighbors(n_neighbors=self.neighbors).fit(view.T).kneighbors_graph(view.T).toarray()
for view in view_matrices]
D = self.neighbors * np.eye(n_samples)
Lv = [D - s for s in S]
U = np.zeros((n_samples, n_samples))
for i in range(n_samples):
if i < self.labled:
U[i, i] = 1e99
else:
U[i, i] = 1
t = 0
alphas = np.array([1 / n_views for i in range(n_views)])
G = np.random.rand(n_features, n_classes)
for iteration in range(self.max_iterations):
L = np.sum(np.array([alphas[i] ** self.gamma * Lv[i] for i in range(n_views)]), 0)
P = np.linalg.inv(L + U + self.mu * np.eye(n_samples))
Q = U.dot(y) + self.mu * X.T.dot(G)
F = P.dot(Q)
A = X.dot(self.mu * np.eye(n_samples) - self.mu ** 2 * P.T).dot(X.T)
B = self.mu * X.dot(P).dot(U).dot(y)
W = np.zeros((n_features, n_features))
for i in range(n_features):
W[i, i] = (G[i].dot(G[i]) ** 1.5) / 4
newG = np.linalg.inv(A + 4 * self.lam * W).dot(B)
sum_for_alphas = sum([pow((1 / np.trace(F.T.dot(Lv[i]).dot(F))), (1 / (self.gamma - 1)))
for i in range(n_views)])
alphas = [pow((1 / np.trace(F.T.dot(Lv[i]).dot(F))), (1 / (self.gamma - 1))) / sum_for_alphas
for i in range(n_views)]
diff = np.sum(np.abs(G - newG))
print("diff ", diff)
if (diff < self.epsilon):
break
G = newG
return G
def feature_ranking(self, G):
"""
Choose p features.
Parameters
----------
G : array-like, shape (n_features, c)
Feature weight matrix.
p : amount of features to select
Returns
-------
indices : array-like, shape(p)
Indices of p selected features.
"""
features_weights = np.sum(np.abs(G), 1)
features_weights = [(i, features_weights[i]) for i in range(len(features_weights))]
features_weights = sorted(features_weights, key=lambda el: el[1])
return list(map(lambda el: el[0], features_weights[-self.p:]))
```
#### File: filters/univariate/UnivariateFilter.py
```python
from numpy import ndarray
from sklearn.base import TransformerMixin, BaseEstimator
from .measures import GLOB_CR, GLOB_MEASURE
from ...utils import DataChecker, generate_features, check_restrictions
class UnivariateFilter(BaseEstimator, TransformerMixin, DataChecker): # TODO ADD LOGGING
"""
Basic interface for using univariate measures for feature selection.
List of available measures is in ITMO_FS.filters.univariate.measures, also you can
provide your own measure but it should suit the argument scheme for measures,
i.e. take two arguments x,y and return scores for all the features in dataset x.
Same applies to cutting rules.
Parameters
----------
measure : string or callable
A metric name defined in GLOB_MEASURE or a callable with signature
measure (sample dataset, labels of dataset samples)
which should return a list of metric values for each feature in the dataset.
cutting_rule : string or callables
A cutting rule name defined in GLOB_CR or a callable with signature
cutting_rule (features),
which should return a list features ranked by some rule.
See Also
--------
Examples
--------
>>> from sklearn.datasets import make_classification
>>> from ITMO_FS.filters.univariate import select_k_best
>>> from ITMO_FS.filters.univariate import UnivariateFilter
>>> from ITMO_FS.filters.univariate import f_ratio_measure
>>> x, y = make_classification(1000, 100, n_informative = 10, n_redundant = 30, \
n_repeated = 10, shuffle = False)
>>> ufilter = UnivariateFilter(f_ratio_measure, select_k_best(10))
>>> ufilter.fit(x, y)
"""
def __init__(self, measure, cutting_rule=("Best by percentage", 0.2)):
# TODO Check measure and cutting_rule
super().__init__()
if type(measure) is str:
try:
self.measure = GLOB_MEASURE[measure]
except KeyError:
raise KeyError("No %r measure yet" % measure)
elif hasattr(measure, '__call__'):
self.measure = measure
else:
raise KeyError("%r isn't a measure function or string" % measure)
def __apply_cr(self):
if type(self.cutting_rule) is tuple:
cutting_rule_name = self.cutting_rule[0]
cutting_rule_value = self.cutting_rule[1]
try:
self.cutting_rule = GLOB_CR[cutting_rule_name](cutting_rule_value)
except KeyError:
raise KeyError("No %r cutting rule yet" % cutting_rule_name)
elif hasattr(self.cutting_rule, '__call__'):
self.cutting_rule = self.cutting_rule
else:
raise KeyError("%r isn't a cutting rule function or string" % self.cutting_rule)
check_restrictions(self.measure.__name__, self.cutting_rule.__name__)
def __apply_ms(self):
if type(self.measure) is str:
try:
self.measure = GLOB_MEASURE[self.measure]
except KeyError:
raise KeyError("No %r measure yet" % self.measure)
elif hasattr(self.measure, '__call__'):
self.measure = self.measure
else:
raise KeyError("%r isn't a measure function or string" % self.measure)
def get_scores(self, X, y, feature_names):
"""
Counts feature scores on given data.
Parameters
----------
X : array-like, shape (n_features, n_samples)
The training input samples.
y : array-like, shape (n_samples, )
The target values.
feature_names : list of strings
In case you want to define feature names
Returns
------
dictionary of format: key - feature_names, values - feature scores
"""
self.__apply_ms()
return dict(zip(feature_names, self.measure(X, y)))
def fit_transform(self, X, y=None, feature_names=None, store_scores=False, **fit_params):
"""
Fits the filter and transforms given dataset X.
Parameters
----------
X : array-like, shape (n_features, n_samples)
The training input samples.
y : array-like, shape (n_samples, ), optional
The target values.
feature_names : list of strings, optional
In case you want to define feature names
store_scores : boolean, optional (by default False)
In case you want to store the scores of features
for future calls to Univariate filter
**fit_params :
dictonary of measure parameter if needed.
Returns
------
X dataset sliced with features selected by the filter
"""
self.fit(X, y, feature_names, store_scores)
return self.transform(X)
def fit(self, X, y, feature_names=None, store_scores=True):
"""
Fits the filter.
Parameters
----------
X : array-like, shape (n_features, n_samples)
The training input samples.
y : array-like, shape (n_samples, )
The target values.
feature_names : list of strings, optional
In case you want to define feature names
store_scores : boolean, optional (by default False)
In case you want to store the scores of features
for future calls to Univariate filter
Returns
------
None
"""
self.__apply_ms()
self.__apply_cr()
X, y, feature_names = self._check_input(X, y, feature_names)
features = generate_features(X, feature_names)
self.feature_names = dict(zip(features, feature_names))
feature_scores = self.get_scores(X, y, features)
if store_scores:
self.feature_scores = feature_scores
self.selected_features = self.cutting_rule(feature_scores)
def transform(self, X):
"""
Slices given dataset by previously selected features.
Parameters
----------
X : array-like, shape (n_features, n_samples)
The training input samples.
Returns
------
X dataset sliced with features selected by the filter
"""
if type(X) is ndarray:
return X[:, self.selected_features]
else:
return X[self.selected_features]
def __repr__(self, **kwargs):
return "Univariate filter with measure {} and cutting rule {}".format(self.measure.__name__,
self.cutting_rule.__name__)
```
#### File: ITMO_FS/utils/information_theory.py
```python
from collections import Counter
from itertools import groupby
from math import log, fsum
from operator import itemgetter
import numpy as np
def conditional_entropy(x_j, y):
# H(Y|X)
buf = [[e[1] for e in g] for _, g in groupby(sorted(zip(x_j, y)), itemgetter(0))]
return fsum(entropy(group) * len(group) for group in buf) / len(x_j)
def matrix_mutual_information(x, y):
return np.apply_along_axis(mutual_information, 0, x, y)
def mutual_information(x, y):
return entropy(y) - conditional_entropy(x, y)
def conditional_mutual_information(x, y, z):
return entropy(list(zip(x, z))) + entropy(list(zip(y, z))) - entropy(list(zip(x, y, z))) - entropy(z)
def joint_mutual_information(x, y, z):
return mutual_information(x, z) + conditional_mutual_information(y, z, x)
def interaction_information(x, y, z):
return mutual_information(x, z) + mutual_information(y, z) - joint_mutual_information(x, y, z)
def elog(x):
return x * log(x) if 0. < x < 1. else 0.
def entropy(x):
return log(len(x)) - fsum(v * log(v) for v in Counter(x).values()) / len(x)
```
#### File: wrappers/deterministic/AddDelWrapper.py
```python
import random as rnd
from copy import copy
from importlib import reload
import numpy as np
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
class AddDelWrapper(object):
"""
Creates add-del feature wrapper
Parameters
----------
estimator: object
A supervised learning estimator with a fit method
score : callable
A callable function which will be used to estimate score
score : boolean
maximize = True if bigger values are better for score function
seed: int
Seed for python random
best_score : float
The best score of given metric on the feature combination after add-del procedure
See Also
--------
Lecture about feature selection (ru), p.13 - http://www.ccas.ru/voron/download/Modeling.pdf
Examples
--------
>>> from sklearn.metrics import accuracy_score
>>> from sklearn import datasets,linear_model
>>> data = datasets.make_classification(n_samples=1000, n_features=20)
>>> X = np.array(data[0])
>>> y = np.array(data[1])
>>> lg = linear_model.LogisticRegression(solver='lbfgs')
>>> add_del = AddDelWrapper(lg, accuracy_score)
>>> add_del.fit(X, y)
>>> from sklearn.metrics import mean_absolute_error
>>> boston = datasets.load_boston()
>>> X = boston['data']
>>> y = boston['target']
>>> lasso = linear_model.Lasso()
>>> add_del = AddDelWrapper(lasso, mean_absolute_error, maximize=False)
>>> add_del.fit(X, y)
"""
def __init__(self, estimator, score, maximize=True, seed=42):
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should be an estimator implementing "
"'fit' method, %r was passed" % estimator)
self._estimator = estimator
self.score = score
self.maximize = maximize
rnd.seed(seed)
self.best_score = 0.0
def __add(self, X, y, cv=3, silent=True):
prev_score = 0
scores = []
to_append = [i for i in range(X.shape[1])] # list of features not used in final configuration
appended = [] # list of features in final configuration
for feature in to_append:
appended.append(feature)
current_score = abs(np.mean(cross_val_score(self._estimator, X[:, appended], y,
scoring=make_scorer(self.score,
greater_is_better=self.maximize),
cv=cv)))
scores.append(current_score)
if not silent:
print('feature {} (score: {})'.format(feature, current_score))
if self.maximize == True and current_score <= prev_score:
appended.pop()
elif self.maximize == False and current_score > prev_score:
appended.pop()
prev_score = current_score
if not silent:
if self.maximize:
print('max score: {}'.format(np.max(scores)))
elif not self.maximize:
print('min score: {}'.format(np.min(scores)))
return appended
def __del(self, X, y, features, cv=3, silent=True):
prev_score = abs(np.mean(cross_val_score(self._estimator, X[:, features], y,
scoring=make_scorer(self.score, greater_is_better=self.maximize),
cv=cv)))
current_score = 0
scores = [prev_score]
res_score = 0
if not silent:
print('score: {}'.format(prev_score))
iter_features = copy(features)
for feature in iter_features:
features.remove(feature)
current_score = abs(np.mean(cross_val_score(self._estimator, X[:, features], y,
scoring=make_scorer(self.score,
greater_is_better=self.maximize),
cv=cv)))
scores.append(current_score)
if not silent:
print('remove feature {} (score: {})'.format(feature, current_score))
if self.maximize and prev_score > current_score:
features.append(feature)
if not self.maximize and prev_score <= current_score:
features.append(feature)
if self.maximize and current_score > prev_score:
prev_score = current_score
if not self.maximize and current_score <= prev_score:
prev_score = current_score
if self.maximize:
res_score = np.max(scores)
elif not self.maximize:
res_score = np.min(scores)
if silent == 'False':
print('score: {}'.format(res_score))
return features, res_score
def fit(self, X, y, cv=3, silent=True): ##TODO with fit predict
"""
Fits wrapper.
Parameters
----------
X : numpy array or pandas DataFrame, shape (n_samples, n_features)
The training input samples.
y : numpy array of pandas Series, shape (n_samples, )
The target values.
cv=3 : int
Number of splits in cross-validation
silent=True : boolean
If silent=False then prints all the scores during add-del procedure
Returns:
----------
features : list
List of feature after add-del procedure
See Also
--------
examples
--------
:param silent:
:param y:
:param X:
:param cv:
"""
return_feature_names = False
try:
import pandas
if isinstance(X, pandas.DataFrame):
return_feature_names = True
columns = np.array(X.columns)
return_feature_names = True
else:
pandas = reload(pandas)
except ImportError:
pass
X = np.array(X)
y = np.array(y).ravel()
if not silent:
print('add trial')
features = self.__add(X, y, cv, silent)
if not silent:
print('del trial')
features, score = self.__del(X, y, features, cv, silent)
self.best_score = score
if return_feature_names:
features = list(columns[features])
self.__features__ = features
self._estimator.fit(X[:, features], y)
def predict(self, X):
return self._estimator.predict(X)
```
#### File: ITMO_FS/test/ensemble_test.py
```python
import time
import unittest
import numpy as np
from collections import defaultdict
from sklearn.datasets import make_classification, make_regression
from sklearn.metrics import f1_score
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from ITMO_FS.ensembles.measure_based import *
from ITMO_FS.ensembles.ranking_based import *
from ITMO_FS.filters.univariate import *
class MyTestCase(unittest.TestCase):
wide_classification = make_classification(n_features=2000, n_informative=100, n_redundant=500)
tall_classification = make_classification(n_samples=50000, n_features=100, n_informative=23, n_redundant=30)
wide_regression = make_regression(n_features=2000, n_informative=100)
tall_regression = make_regression(n_samples=50000, n_features=200, n_informative=50)
def test_ranking_based_ensemble(self):
data, target = self.wide_classification[0], self.wide_classification[1]
filters = [gini_index,
fechner_corr,
spearman_corr,
pearson_corr]
ensemble = Mixed(filters)
ensemble.fit(data, target)
ensemble.transform(data, 100, borda_fusion)
d = [{'f' + str(i): i for i in range(100)}.items()] * 5
self.assertEqual(borda_fusion(d, 100), ['f' + str(i) for i in reversed(range(100))])
ensemble.transform(data, 100)
self.assertEqual(borda_fusion(d, 100), ['f' + str(i) for i in reversed(range(100))])
def test_weight_based_ensemble(self):
data, target = self.wide_classification[0], self.wide_classification[1]
filters = [UnivariateFilter(gini_index),
UnivariateFilter(fechner_corr),
UnivariateFilter(spearman_corr),
UnivariateFilter(pearson_corr)]
ensemble = WeightBased(filters)
ensemble.fit(data, target)
weights = [0.5, 0.5, 0.5, 0.5]
ensemble.transform(data, select_k_best(100), weights=weights)
def test_benching_ensembles(self):
datasets = [make_classification(n_samples=2000, n_features=20 * i, n_informative=i, n_redundant=5 * i) for i in
[2, 10, 20, 50, 100, 200, 500, 1000]]
filters = [gini_index,
fechner_corr,
spearman_corr,
pearson_corr]
kfold = KFold(n_splits=10)
for dataset in datasets:
X, y = dataset
k = int(X.shape[1] * 0.1)
time_ens_start = []
time_ens_end = []
time_filter_start = defaultdict(list)
time_filter_end = defaultdict(list)
scores_ens = []
scores_filters = defaultdict(list)
scores_no_fs = []
for train_index, test_index in kfold.split(X):
svm = SVC()
svm.fit(X[train_index], y[train_index])
y_pred = svm.predict(X[test_index])
scores_no_fs.append(f1_score(y[test_index], y_pred))
time_ens_start.append(time.time())
ensemble = Mixed(filters)
ensemble.fit(X[train_index], y[train_index])
X_transformed = ensemble.transform(X, k, borda_fusion)
time_ens_end.append(time.time())
svm = SVC()
svm.fit(X_transformed[train_index], y[train_index])
y_pred = svm.predict(X_transformed[test_index])
scores_ens.append(f1_score(y[test_index], y_pred))
for filter in filters:
time_filter_start[filter.__name__].append(time.time())
univ_filter = UnivariateFilter(filter, cutting_rule=("K best", k))
univ_filter.fit(X[train_index], y[train_index])
X_transformed = univ_filter.transform(X)
time_filter_end[filter.__name__].append(time.time())
svm = SVC()
svm.fit(X_transformed[train_index], y[train_index])
y_pred = svm.predict(X_transformed[test_index])
scores_filters[filter.__name__].append(f1_score(y[test_index], y_pred))
print('Dataset size', X.shape)
sum_time = 0
for filter in filters:
filter_dif = np.array(time_filter_end[filter.__name__]) - np.array(time_filter_start[filter.__name__])
print('Filter ' + filter.__name__ + ' time', np.mean(filter_dif), np.std(filter_dif))
sum_time += np.mean(filter_dif)
ens_dif = np.array(time_ens_end) - np.array(time_ens_start)
print('Ensemble time', np.mean(ens_dif), np.std(ens_dif))
print('Sum of filter time', sum_time)
print('No fs score', np.mean(scores_no_fs), np.std(scores_no_fs))
for filter in filters:
print('Filter ' + filter.__name__ + ' time', np.mean(scores_filters[filter.__name__]),
np.std(scores_filters[filter.__name__]))
print('Ensemble score', np.mean(scores_ens), np.std(scores_ens))
print()
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "18813684097/new_di",
"score": 2
} |
#### File: site-packages/noseselenium/cases.py
```python
from nose.plugins.skip import SkipTest
class SeleniumSkipper(object):
"""
A class that raises a SkipTest exception when an attribute is
accessed.
"""
def __getattr__(self, name):
raise SkipTest("SeleniumPlugin not enabled.")
class SeleniumTestCaseMixin(object):
"""
Provides a selenium attribute that raises :class:`SkipTest`
when not overwritten by :class:`SeleniumPlugin`.
"""
# To be replaced by the plugin.
selenium = SeleniumSkipper()
# Triggers the plugin if enabled.
selenium_test = True
start_live_server = True
```
#### File: site-packages/pyselenium/test_steps.py
```python
from pyselenium.test_metadata import ElementFinder
from pyselenium.test_metadata import Step
class Click(ElementFinder, Step):
"""A test step that simulates a click on an element"""
def __init__(self, css_path, hint):
super().__init__(css_path, hint)
def run(self, driver):
step_result = StepResult(self)
try:
driver.click(self.css_path, self.hint)
except Exception as exception:
step_result.exception = exception
return step_result
class Navigate(Step):
"""A test step that navigates to a given URL"""
def __init__(self, url):
super().__init__()
self.url = url
def run(self, driver):
step_result = StepResult(self)
try:
driver.navigate(self.url)
except Exception as exception:
step_result.exception = exception
return step_result
class AssertElementValue(ElementFinder, Step):
"""A test step that asserts the value (text) inside an element"""
def __init__(self, css_path, hint, expected_value):
super().__init__(css_path, hint)
self.expected_value = expected_value
def run(self, driver):
step_result = StepResult(self)
element_value = None
try:
element_value = driver.get_element_value(self.css_path, self.hint)
except Exception as exception:
step_result.exception = exception
else:
if element_value != self.expected_value:
step_result.exception = ElementValueIncorrectError(self.css_path, self.hint, element_value,
self.expected_value)
return step_result
class AssertElementAttributeValue(ElementFinder, Step):
"""A test step that compares a given value to a given attribute value of the web element"""
def __init__(self, css_path, hint, attribute_name, expected_value):
super().__init__(css_path, hint)
self.attribute_name = attribute_name
self.expected_value = expected_value
def run(self, driver):
step_result = StepResult(self)
attribute_value = None
try:
attribute_value = driver.get_element_attribute(self.css_path, self.hint, self.attribute_name)
except Exception as exception:
step_result.exception = exception
else:
if attribute_value != self.expected_value:
step_result.exception = ElementAttributeValueIncorrectError(self.css_path, self.hint,
self.attribute_name,
attribute_value, self.expected_value)
return step_result
class ClickIfFound(ElementFinder, Step):
"""A test step that clicks an element if it is found but doesn't fail if it's not found"""
def __init__(self, css_path, hint, wait_time):
super().__init__(css_path, hint)
self.wait_time = wait_time
def run(self, driver):
step_result = StepResult(self)
try:
driver.click_if_found(self.css_path, self.hint, self.wait_time)
except Exception as exception:
step_result.exception = exception
return step_result
class AssertElementNotPresent(ElementFinder, Step):
"""Asserts that an element at a given CSS path is not present on the web page
after trying to find it for a given time"""
def __init__(self, css_path, hint, wait_time):
super().__init__(css_path, hint)
self.wait_time = wait_time
def run(self, driver):
step_result = StepResult(self)
try:
element_found = driver.can_find_element(self.css_path, self.wait_time)
except Exception as exception:
step_result.exception = exception
else:
if element_found:
step_result.exception = ElementShouldNotBePresentError(self.css_path, self.hint, self.wait_time)
return step_result
class TypeText(ElementFinder, Step):
"""Selects an element and simulates the user typing the specified text in the element"""
def __init__(self, css_path, hint, text):
super().__init__(css_path, hint)
self.text = text
def run(self, driver):
step_result = StepResult(self)
try:
driver.send_text(self.css_path, self.hint, self.text)
except Exception as exception:
step_result.exception = exception
return step_result
class SendEnter(Step):
"""Sends the enter key to simulate the user hitting the return button on the keyboard"""
def __init__(self):
super().__init__()
def run(self, driver):
step_result = StepResult(self)
try:
driver.send_enter_key()
except Exception as exception:
step_result.exception = exception
return step_result
class SelectDropDownItemByText(ElementFinder, Step):
"""Selects an item inside a dropdown control by its text"""
def __init__(self, css_path, hint, item_text):
super().__init__(css_path, hint)
self.item_text = item_text
def run(self, driver):
step_result = StepResult(self)
try:
driver.select_drop_down_item_by_text(self.css_path, self.hint, self.item_text)
except Exception as exception:
step_result.exception = exception
return step_result
class SetCheckbox(ElementFinder, Step):
"""Checks or unchecks a checkbox web element"""
def __init__(self, css_path, hint, checked):
super().__init__(css_path, hint)
self.checked = checked
def run(self, driver):
step_result = StepResult(self)
try:
driver.set_checkbox(self.css_path, self.hint, self.checked)
except Exception as exception:
step_result.exception = exception
return step_result
class SwitchFrame(ElementFinder, Step):
""""Switches the context of the web driver to the iFrame found at the specified CSS path"""
def __init__(self, css_path, hint):
super().__init__(css_path, hint)
def run(self, driver):
step_result = StepResult(self)
try:
driver.switch_to_frame(self.css_path, self.hint)
except Exception as exception:
step_result.exception = exception
return step_result
class SwitchToDefaultContent(Step):
"""Switches the context of the web driver back to the default content of the web page"""
def run(self, driver):
step_result = StepResult(self)
try:
driver.switch_to_default_content()
except Exception as exception:
step_result.exception = exception
return step_result
class StepResult:
"""Represents the result of the execution of a test step.
Attributes:
success -- True if the step execution was successful, false otherwise
exception -- An exception that might have occurred during step execution. None if no exception occurred
step -- The step that was executed
"""
def __init__(self, step):
self.success = True
self.step = step
self._exception = None
@property
def exception(self):
""""Gets an exception if one occurred during execution of the step. Returns None otherwise."""
return self._exception
@exception.setter
def exception(self, value):
""""Sets the exception that occurred during execution of the step."""
self._exception = value
self.success = False
class StepExecutionError(Exception):
""""Represents failures in test steps execution"""
pass
class ElementAttributeValueIncorrectError(StepExecutionError):
"""An exception thrown when the value of an element's given attribute is different from the expected value"""
def __init__(self, css_path, hint, attribute_name, actual_value, expected_value):
super().__init__()
self.css_path = css_path
self.hint = hint
self.attribute_name = attribute_name
self.actual_value = actual_value
self.expected_value = expected_value
class ElementValueIncorrectError(StepExecutionError):
""""An exception thrown when the value of an element is different from the expected value"""
def __init__(self, css_path, hint, actual_value, expected_value):
super().__init__()
self.css_path = css_path
self.hint = hint
self.actual_value = actual_value
self.expected_value = expected_value
class ElementShouldNotBePresentError(StepExecutionError):
"""An exception thrown when an element that should not be present on the web page is found
Attributes:
- wait_time: A value, in seconds, that Selenium will keep looking for the element before considering that it is
not present
"""
def __init__(self, css_path, hint, wait_time):
super().__init__()
self.css_path = css_path
self.hint = hint
self.wait_time = wait_time
``` |
{
"source": "18819F-Project/ModellingDynamicalSystem",
"score": 3
} |
#### File: ModellingDynamicalSystem/dynamics/lagrangian.py
```python
import torch
from torch.autograd.functional import jacobian, hessian
def solve_euler_lagrange(lagrange_fcn, state):
'''
:param lagrange_fcn: any generic lagrange calculating function for a
dynamical system. Input must only be state, and
output must only be a single scalar representing
the calculated energy of the system
:param state: state vector of the system
:return: state_dot: calculated derivative of the input state, which can
be used to integrate and simulate system dynamics
'''
n = state.shape[0] // 2 # Get number of generalized coords
xv = torch.autograd.Variable(state, requires_grad=True)
xv = torch.flatten(xv)
y, yt = torch.split(xv, 2, dim=0)
# The hessian/jacobian are calculated w.r.t all variables in xv
# so select only relevant parts using slices
A = hessian(lagrange_fcn, xv, create_graph=True)[n:, n:]
B = torch.squeeze(jacobian(lagrange_fcn, xv, create_graph=True))[:n]
C = hessian(lagrange_fcn, xv, create_graph=True)[n:, :n]
ytt = torch.linalg.solve(A, (B - C @ yt).T)
state_dot = torch.cat([yt, torch.squeeze(ytt)])
return state_dot
``` |
{
"source": "18827547638/scrapy",
"score": 3
} |
#### File: coolscrapy/spiders/article_spider.py
```python
from coolscrapy.utils import parse_text
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from coolscrapy.items import Article
class ArticleSpider(CrawlSpider):
name = "article"
def __init__(self, rule):
self.rule = rule
self.name = rule.name
self.allowed_domains = rule.allow_domains.split(",")
self.start_urls = rule.start_urls.split(",")
rule_list = []
# 添加`下一页`的规则
if rule.next_page:
rule_list.append(Rule(LinkExtractor(restrict_xpaths=rule.next_page)))
# 添加抽取文章链接的规则
rule_list.append(Rule(LinkExtractor(
allow=[rule.allow_url],
restrict_xpaths=[rule.extract_from]),
callback='parse_item'))
self.rules = tuple(rule_list)
super(ArticleSpider, self).__init__()
def parse_item(self, response):
self.log('Hi, this is an article page! %s' % response.url)
article = Article()
article["url"] = response.url
title = response.xpath(self.rule.title_xpath).extract()
article["title"] = parse_text(title, self.rule.name, 'title')
body = response.xpath(self.rule.body_xpath).extract()
article["body"] = parse_text(body, self.rule.name, 'body')
publish_time = response.xpath(self.rule.publish_time_xpath).extract()
article["publish_time"] = parse_text(publish_time, self.rule.name, 'publish_time')
article["source_site"] = self.rule.source_site
return article
``` |
{
"source": "18839782321/lyf.github.io",
"score": 2
} |
#### File: shixiseng/spiders/sxs_search_jobs.py
```python
import base64
import os
import re
import scrapy
from fontTools.ttLib import TTFont
from lxml import etree
class SxsSearchJobsSpider(scrapy.Spider):
name = 'sxs_search_jobs'
allowed_domains = ['shixiseng.com']
start_urls = ['inn_oaa5eydglz7z', 'inn_bulgyhycojpc', 'inn_pfenyj08bcmw', 'inn_zatlrnw9hmpx', 'inn_4l9om3okqp6b',
'inn_ezfp1kviqbw0', 'inn_4lndsaqt4mhi', 'inn_cxmaalq5ggjm', 'inn_dcwk1hhkamat', 'inn_e69yvlosufbx',
'inn_fcnkynrtddmb', 'inn_g5euq2s321uc', 'inn_jwinkxwrbjrx', 'inn_jwinkxwrbjrx', 'inn_kxws5hnei0re',
'inn_m1xazpyammzf', 'inn_n97yutog5wdq', 'inn_ojqebb13cope', 'inn_p1dker7toat5', 'inn_phol4rfufz8i',
'inn_qistj5t6leav']
def start_requests(self):
for u in self.start_urls:
url = f'https://www.shixiseng.com/intern/{u}?pcm=pc_Company'
yield scrapy.Request(
url=url,
callback=self.parse,
dont_filter=True,
meta={'handle_httpstatus_list': [302]}
)
def parse(self, response):
if response.status in (302,):
yield scrapy.Request(
url=response.url,
callback=self.parse,
dont_filter=True
)
else:
jobid = str(response.url).split('intern/')[1].split('?pcm')[0]
path1 = os.path.abspath(__file__)
path2 = os.path.dirname(path1)
try:
font_face = str(re.findall(re.compile(r'base64,(.*?)}', re.S), response.text)[0]).strip('")')
except Exception:
font_face = ''
if font_face == '':
self.logger.warning(f'页面找不到了:{response.url}')
else:
b = base64.b64decode(font_face)
with open(f'{path2}\\{jobid}.woff', 'wb') as f:
f.write(b)
f.close()
font = TTFont(f'{path2}\\{jobid}.woff')
os.remove(f'{path2}\\{jobid}.woff')
ccmap = font['cmap'].getBestCmap()
newmap = {}
for key, value in ccmap.items():
# 转换成十六进制
key = hex(key)
value = value.replace('uni', '')
a = 'u' + '0' * (4 - len(value)) + value
newmap[key] = a
# 删除第一个没用的元素
newmap.pop('0x78')
# 加上前缀u变成unicode....
for i, j in newmap.items():
newmap[i] = eval("u" + "\'\\" + j + "\'")
new_dict = {}
# 根据网页上显示的字符样式改变键值对的显示
for key, value in newmap.items():
key_ = key.replace('0x', '&#x')
new_dict[key_] = value
yield scrapy.Request(
url=response.url,
callback=self.parse_detail,
dont_filter=True,
meta={'new_dict': new_dict, 'jobid': jobid, 'handle_httpstatus_list': [302]}
)
def parse_detail(self, response):
if response.status in (302,):
yield scrapy.Request(
url=response.url,
callback=self.parse,
dont_filter=True
)
else:
new_dict = response.meta.get('new_dict')
jobid = response.meta.get('jobid')
html = response.text
for key, value in new_dict.items():
html = html.replace(key, value)
res = etree.HTML(html)
item = {}
# 职位名称
item['poname'] = res.xpath('//div[@class="new_job_name"]/@title')[0]
# 城市
item['city'] = res.xpath('//span[@class="job_position"]/@title')[0]
# 薪资
item['salary'] = res.xpath('//span[@class="job_money cutom_font"]/text()')[0]
# 学历
item['edu'] = res.xpath('//span[@class="job_academic"]/text()')[0]
# 工作地点
try:
item['address'] = res.xpath('//span[@class="com_position"]/text()')[0]
except Exception:
item['address'] = ''
# 每周工作天数
item['job_week'] = res.xpath('//span[@class="job_week cutom_font"]/text()')[0]
# 实习时间
try:
item['job_time'] = res.xpath('//span[@class="job_time cutom_font"]/text()')[0]
except Exception:
item['job_time'] = ''
# 职位详情地址
item['job_url'] = response.url
item['jobid'] = jobid
# 职位介绍
item['jd'] = '\n'.join(res.xpath('//div[@class="job_part"]//text()'))
# 职位福利
item['welfare'] = '|'.join(res.xpath('//div[@class="job_good_list"]/span//text()'))
# 职位更新时间
item['update_time'] = res.xpath('//div[@class="job_date "]/span[1]/text()')[0]
# 截止时间
item['end_time'] = re.findall(re.compile(r'截止日期:(.*?)</div>', re.S), html)[0]
# 简历要求
item['resume_requirement'] = re.findall(re.compile(r'简历要求:(.*?)</div>', re.S), html)[0]
# 公司名称
try:
item['coname'] = str(res.xpath('//a[@class="com-name "]/text()')[0]).strip()
except Exception:
item['coname'] = str(
res.xpath('//a[@class="com-name com-name--with-label"]/text()')[0]).strip()
try:
href = res.xpath('//a[@class="com-name "]/@href')[0]
except Exception:
href = res.xpath('//a[@class="com-name com-name--with-label"]/@href')[0]
# https://www.shixiseng.com/com/com_apjk2grlcpdj?pcm=pc_SearchList
# 公司唯一id
item['coid'] = str(href).split('/com/')[1].split('?')[0]
# 企业性质
try:
item['conature'] = str(
re.findall(re.compile(r'iconqiyexingzhi.*?</i>(.*?)</div>', re.S), html)[0]).strip()
except Exception:
item['conature'] = ''
item['flag'] = 'sxs_search_jobs'
print(item)
if __name__ == '__main__':
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'sxs_search_jobs'])
``` |
{
"source": "18840518110/meiduo_project",
"score": 2
} |
#### File: meiduo_mall/apps/testlog.py
```python
from meiduo_mall.settings.dev import logger
def log():
logger.debug('测试logging模块debug')
logger.info('测试logging模块info')
logger.error('测试logging模块error')
``` |
{
"source": "18845635368/multicard",
"score": 2
} |
#### File: multicard/config/config.py
```python
import ml_collections
def config_test():
config = ml_collections.ConfigDict()
# !随机参数配置
config.seed = 1
# !硬件配置
# 使用的GPU数目
config.gpus = 2
config.world_size = 2
# 使用的进程平台
config.backend = 'nccl'
#
config.init_method = 'tcp://10.249.178.201:34567'
# !transform配置
config.face = 'scale'
config.size = 224
# !训练参数配置
config.batch = 64
config.epochs = 30
config.syncbn = True
# 空域分支选用
config.net = 'EfficientNetB4'
# 训练集与验证集切分标准
config.traindb = ["ff-c23-720-140-140"]
config.valdb = ["ff-c23-720-140-140"]
# !数据集配置
# 切割脸部照片的存放目录
config.ffpp_faces_df_path = '/mnt/8T/FFPP/df/output/FFPP_df.pkl'
# 切割脸部的Dataframe存放地点
config.ffpp_faces_dir = '/mnt/8T/FFPP/faces/output'
# 多久验证一次模型,单位(batch)
config.valint = 100
config.valsamples = 6000
# 多久记录一次log,单位(batch)
config.logint = 100
# 多久保存一次模型,
config.modelperiod = 500
# !优化器配置
config.lr = 1e-3
config.patience = 10
# !模型加载配置
config.scratch = False
config.models_dir = '/mnt/8T/multicard/weights/binclass/'
# 0会加载最优模型,1会加载最新的模型,2会加载制定的模型
config.mode = 0
config.index = 0
config.workers = 4
# !logpath
config.log_dir = '/mnt/8T/multicard/runs/binclass/'
# !暂时无用配置
config.debug = False
config.dfdc_faces_df_path = ''
config.dfdc_faces_dir = ''
return config
``` |
{
"source": "18853477039/social",
"score": 2
} |
#### File: Calculator /app/__init__.py
```python
from flask import Flask
from flask_restful import Api
from app import views
from app.apis import Calculator
def create_app():
app = Flask(__name__)
app.debug = True
api = Api(app)
api.add_resource(Calculator, '/cal_num')
app.register_blueprint(views.bp)
return app
```
#### File: Calculator /app/views.py
```python
from flask import Blueprint, render_template, request
bp = Blueprint('blue', __name__)
@bp.route('/')
def hello_world():
return 'Hello World!'
@bp.route('/cla/', methods=['GET', 'POST'])
def cla():
print("??")
if request.method == 'POST':
print(1)
# fir_num = request.form.get('fir_num')
# sec_num = request.form.get('sec_num')
return render_template('calculator.html')
``` |
{
"source": "18892021125/NWU-ACM-MIS-backend",
"score": 2
} |
#### File: NWU-ACM-MIS-backend/contest/models.py
```python
from django.db import models
from member.models import Member
class Contest(models.Model):
"""竞赛信息"""
class Level(models.TextChoices):
STATE = 'ST', '国级'
PROVINCE = 'PR', '省级'
CAMPUS = 'CP', '校级'
class Type(models.TextChoices):
ICPC = 'IC', 'ICPC'
CCPC = 'CC', 'CCPC'
CCCC = '4C', 'CCCC'
OTHERS = 'OT', 'Other'
name = models.CharField('竞赛名', max_length=30)
date = models.DateField('日期')
typ = models.CharField('类型', max_length=2, choices=Type.choices)
level = models.CharField('分级', max_length=2, choices=Level.choices)
members = models.ManyToManyField(
Member, 'contests',
blank=True,
verbose_name='参加队员'
)
class Meta:
verbose_name = verbose_name_plural = '竞赛'
def __str__(self):
return f"{self.name}"
class Reward(models.Model):
contest = models.ForeignKey(Contest, models.PROTECT, verbose_name='竞赛')
class RewardType(models.TextChoices):
GOLD = 'G', '金奖'
SILVER = 'S', '银奖'
BRONZE = 'B', '铜奖'
GRAND = '0', '特等奖'
FIRST = '1', '一等奖'
SECOND = '2', '二等奖'
THIRD = '3', '三等奖'
reward_type = models.CharField('奖牌类型', max_length=2, choices=RewardType.choices)
certificate = models.FileField('证书', upload_to='certificates', null=True, blank=True)
class Meta:
abstract = True
def __str__(self):
return f"{self.contest}"
class PersonalReward(Reward):
member = models.ForeignKey(Member, models.PROTECT, verbose_name='队员')
class Meta:
verbose_name = verbose_name_plural = '个人奖项'
class TeamReward(Reward):
team_name = models.CharField('队名', max_length=64)
members = models.ManyToManyField(Member, verbose_name='队员')
class Meta:
verbose_name = verbose_name_plural = '团队奖项'
class CollegeReward(Reward):
members = models.ManyToManyField(Member, verbose_name='队员')
class Meta:
verbose_name = verbose_name_plural = '学校奖项'
```
#### File: NWU-ACM-MIS-backend/member/admin.py
```python
from django.contrib import admin
from member.models import (
Team, Member, Contribution, Training, Tag, Achievement
)
class MemberTagInline(admin.TabularInline):
model = Member.tags.through
extra = 0
verbose_name = verbose_name_plural = '队员标签'
autocomplete_fields = ('tag',)
@admin.register(Member)
class MemberAdmin(admin.ModelAdmin):
list_display = ('realname', 'nickname', 'stu_id', 'department', 'grade', 'role')
list_filter = ('role', 'grade')
search_fields = ('realname', 'user__nickname', 'stu_id', 'role')
ordering = ('-user__date_joined',)
fields = (
'user',
'stu_id',
'realname',
'college',
'department',
'grade',
'cf_id',
'vj_id',
'role',
'need_peer',
'team',
)
autocomplete_fields = ('user', 'team')
inlines = (MemberTagInline,)
def nickname(self, obj):
return obj.user.nickname
nickname.short_description = '昵称'
@admin.register(Contribution)
class ContributeAdmin(admin.ModelAdmin):
list_display = ('title', 'typ', 'date', 'members_display')
ordering = ('-date',)
fields = ('title', 'typ', 'date', 'description', 'members')
autocomplete_fields = ('members',)
readonly_fields = ('date',)
def members_display(self, obj):
return list(member.realname for member in obj.members.all())
members_display.short_description = '贡献队员'
@admin.register(Team)
class TeamAdmin(admin.ModelAdmin):
list_display = ('name_ch', 'name_en', 'members')
search_fields = ('name_ch', 'name_en')
fields = ('name_ch', 'name_en')
def members(self, obj):
return list(member.realname for member in obj.members.all())
members.short_description = '队员'
@admin.register(Training)
class TrainingAdmin(admin.ModelAdmin):
list_display = ('member', 'clock_in', 'clock_out', 'duration')
fields = ('member', 'clock_in', 'clock_out', 'duration')
readonly_fields = ('member', 'clock_in', 'clock_out', 'duration')
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
search_fields = ('name',)
fields = ('name',)
@admin.register(Achievement)
class AchievementAdmin(admin.ModelAdmin):
list_display = ('name', 'level')
search_fields = ('name',)
fields = ('name', 'level', 'detail', 'members')
```
#### File: management/commands/fakememberdata.py
```python
import os, json
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from user.models import User
from member.models import Team, Member
class Command(BaseCommand):
help = '重置或生成假user数据'
def handle(self, *args, **options):
commands_path = os.path.abspath(os.path.dirname(__file__))
with open(commands_path + '/fakememberdata.json', encoding='utf-8') as f:
fake_data = json.load(f)
for fake in fake_data['Team']:
team, created = Team.objects.update_or_create(
name_ch=fake['name_ch'],
defaults=fake
)
for fake in fake_data['Member']:
email = fake.pop('email')
if not User.objects.filter(email=email).exists():
raise CommandError(f'未找到email为{email}的user')
fake['user'] = User.objects.get(email=email)
member, created = Member.objects.update_or_create(
user__email=email,
defaults=fake
)
```
#### File: NWU-ACM-MIS-backend/member/serializers.py
```python
from rest_framework import serializers
from user.models import User
from user.serializers import UserSerializer, UserConciseSerializer
from member.models import Achievement, Member, Team
class MemberConciseSerializer(serializers.ModelSerializer):
user = UserConciseSerializer()
class Meta:
model = Member
fields = (
'user',
'role',
'realname',
'stu_id',
)
read_only_fields = fields
class TeamSerializer(serializers.ModelSerializer):
members = MemberConciseSerializer(many=True, read_only=True)
class Meta:
model = Team
fields = ('id', 'name_ch', 'name_en', 'members')
read_only_fields = ('id', 'members')
class AchievementsSerializer(serializers.ModelSerializer):
class Meta:
model = Achievement
fields = ('id', 'name', 'level', 'detail')
read_only_fields = fields
class MemberSerializer(serializers.ModelSerializer):
user = UserSerializer()
team = TeamSerializer(read_only=True)
tags = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
achievements = AchievementsSerializer(many=True, read_only=True)
class Meta:
model = Member
fields = (
'user',
'role',
'stu_id',
'realname',
'college',
'department',
'grade',
'cf_id',
'vj_id',
'need_peer',
'team',
'tags',
'achievements',
)
read_only_fields = (
'role',
'stu_id',
'realname',
'college',
'department',
'grade',
'team',
'tags',
'achievements',
)
def update(self, instance: Member, validated_data: dict):
user_data: dict = validated_data.pop('user', None)
if user_data is not None:
serializer = UserSerializer(instance.user, user_data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
return super().update(instance, validated_data)
```
#### File: NWU-ACM-MIS-backend/member/signals.py
```python
from django.db.models.signals import post_save
from django.dispatch import receiver
from user.models import User
from member.models import Member
@receiver(post_save, sender=Member, dispatch_uid="创建队员之后自动修改用户类型")
def _(sender, instance: Member=None, created=False, **kwargs):
if created:
instance.user.role = User.Role.MEMBER
instance.save()
```
#### File: NWU-ACM-MIS-backend/plan/admin.py
```python
import smtplib
from django.core.mail import send_mail
from django.contrib import admin
from django.shortcuts import HttpResponseRedirect
from markdownx.admin import MarkdownxModelAdmin
from NWU_ACM_MIS import settings
from member.models import Member
from plan.models import Plan, Announcement
mail_content_template = '''
我们发布了一个新的计划: {}
计划时间:
开始: {}
结束: {}
以下是详细信息:
{}
欢迎参加,
{}
'''
@admin.register(Plan)
class PlanAdmin(admin.ModelAdmin):
list_display = ('name', 'typ', 'clock_in', 'clock_out', 'has_sent')
ordering = ('-clock_in',)
add_fieldsets = (
(None, {'fields': (
'name',
'typ',
'detail',
'clock_in',
'clock_out',
)}),
)
fieldsets = add_fieldsets + (
('邮件通知', {'fields': (
'members',
'has_sent',
)}),
)
readonly_fields = ('has_sent',)
autocomplete_fields = ('members',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def response_change(self, request, obj:Plan):
if '_addallads' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.AD))
self.message_user(request, '添加了所有的现役队员')
return HttpResponseRedirect('.')
if '_addallnovices' in request.POST:
obj.save()
obj.members.add(*Member.objects.filter(role=Member.Role.NOVICE))
self.message_user(request, '添加了所有的萌新队员')
return HttpResponseRedirect('.')
if '_sendemail' in request.POST:
obj.has_sent = True
obj.save()
failed_members = []
mail_content = mail_content_template.format(
obj.name, obj.clock_in, obj.clock_out, obj.detail,
settings.PROJECT_VERBOSE_NAME
)
for member in obj.members.all():
try:
send_mail(
obj.name,
f'您好,{member.realname}' + mail_content,
settings.EMAIL_FROM,
recipient_list=[member.user.email, ],
fail_silently=False,
)
except smtplib.SMTPException:
failed_members.append(member)
message = '己发送邮件'
if failed_members:
message += f', 其中给{failed_members}发送时失败'
self.message_user(request, message)
return HttpResponseRedirect('.')
return super().response_change(request, obj)
@admin.register(Announcement)
class AnnouncementAdmin(MarkdownxModelAdmin):
list_display = ('title', 'created_date', 'changed_date')
ordering = ('-created_date',)
fields = ('title', 'content', 'created_date', 'changed_date')
readonly_fields = ('created_date', 'changed_date')
```
#### File: NWU-ACM-MIS-backend/plan/views.py
```python
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from plan.models import Announcement
from plan.serializers import (
AnnouncementSerializer, AnnouncementSimpleSerializer
)
class AnnouncementView(APIView):
"""公告"""
permission_classes = []
def get(self, request, id=None):
"""查询"""
if id is None:
annos = Announcement.objects.all()
serializer = AnnouncementSimpleSerializer(annos, many=True)
else:
anno = get_object_or_404(Announcement, id=id)
serializer = AnnouncementSerializer(anno)
return Response(serializer.data, status=status.HTTP_200_OK)
``` |
Subsets and Splits