hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b42fb1dd536d22723ae90ddff8288dfe9cda8bbe
| 219 |
py
|
Python
|
blog/models.py
|
rpopov94/portfolio-project
|
60e3c59e41dee1dff314727b8071c7c041fad9f8
|
[
"Apache-2.0"
] | null | null | null |
blog/models.py
|
rpopov94/portfolio-project
|
60e3c59e41dee1dff314727b8071c7c041fad9f8
|
[
"Apache-2.0"
] | null | null | null |
blog/models.py
|
rpopov94/portfolio-project
|
60e3c59e41dee1dff314727b8071c7c041fad9f8
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Blog(models.Model):
title = models.CharField(max_length=200)
description = models.TextField()
date = models.DateField()
def __str__(self):
return self.title
| 21.9 | 44 | 0.689498 |
d045996d2ac27ed41f3ba7f9dde8752714a9ddc2
| 519 |
py
|
Python
|
objectModel/Python/cdm/utilities/import_info.py
|
rt112000/CDM
|
34bd34f9260140a8f8aa02bd87c23033f3daad4c
|
[
"CC-BY-4.0",
"MIT"
] | 884 |
2019-05-10T02:09:10.000Z
|
2022-03-31T14:02:00.000Z
|
objectModel/Python/cdm/utilities/import_info.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | 171 |
2019-06-10T11:34:37.000Z
|
2022-03-31T22:50:12.000Z
|
objectModel/Python/cdm/utilities/import_info.py
|
spbast/CDM
|
bf97a3720c97ee4c9df3625084cf8b3bc65ff9c7
|
[
"CC-BY-4.0",
"MIT"
] | 340 |
2019-05-07T18:00:16.000Z
|
2022-03-31T12:00:15.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
class ImportInfo:
"""Contains information about an import in the imports pririty map."""
def __init__(self, priority: int, is_moniker: bool):
# The priority that the import has with respect to the document where it is imported.
self.priority = priority
# If the import has a moniker or not.
self.is_moniker = is_moniker
| 37.071429 | 94 | 0.714836 |
693f45705fdbe0a59ca0f04322f18881b1b8db78
| 1,857 |
py
|
Python
|
share/qt/extract_strings_qt.py
|
emixil/emixil
|
7cc0833f5290541d29eaf7fe5be3341235212512
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
emixil/emixil
|
7cc0833f5290541d29eaf7fe5be3341235212512
|
[
"MIT"
] | null | null | null |
share/qt/extract_strings_qt.py
|
emixil/emixil
|
7cc0833f5290541d29eaf7fe5be3341235212512
|
[
"MIT"
] | 20 |
2021-02-10T06:55:21.000Z
|
2021-02-23T21:02:43.000Z
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/emixilstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *emixil_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("emixil-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| 23.506329 | 79 | 0.5972 |
a7fa57dd922ffb1c2cd5e115642b41ec755f38c1
| 7,372 |
py
|
Python
|
data/external/repositories/154909/march_madness-master/team.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/154909/march_madness-master/team.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/154909/march_madness-master/team.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1 |
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
"""
The polls that have the most data, in order
"SAG" "12" "6114.0"
"MOR" "12" "6031.5"
"POM" "12" "5823.91666666667"
"WLK" "12" "4862.16666666667"
"BOB" "12" "4700.25"
"DOL" "12" "4594.16666666667"
"COL" "12" "4252.08333333333"
"RPI" "12" "4039.0"
"WOL" "12" "3966.58333333333"
"RTH" "12" "3922.0"
"USA" "12" "454.666666666667"
"AP" "12" "454.416666666667"
"""
POLLS = ('SAG', 'MOR', 'POM', 'WLK', 'BOB', 'DOL', 'COL', 'RPI', 'WOL', 'RTH', 'USA', 'AP')
STATS = (
'fga',
'fgm',
'fta',
'ftm',
'fga3',
'fgm3',
'stl',
'ast',
'dr',
'or',
'blk',
'pf',
'to'
)
import numpy
from data import DataHandler
class AggregatorCollector:
def __init__(self, aggregators):
self.aggregators = {}
for aggregator in aggregators:
self.aggregators[aggregator.label] = aggregator
def __getitem__(self, item):
return self.aggregators[item]
def update(self, game, team_obj):
prefix = 'w' if game['wteam'] == team_obj.id else 'l'
for aggregator in self.aggregators.itervalues():
aggregator.update(game, team_obj, prefix)
class Aggregator:
def __init__(self, label, func):
self.label = label
self._func = func
self.season = None
self.val = []
def reset(self):
self.val = []
def update(self, game, team_obj, prefix):
if game['season'] != self.season:
self.season = game['season']
self.reset()
self.val.append(self._func(game, team_obj, prefix, self.val))
@property
def value(self):
return numpy.median(self.val[:-1])
def wins(game, team, prefix, val):
return val + int(prefix == 'w')
def losses(game, team, prefix, val):
return val + int(prefix == 'l')
def stat_agg(stat, against=False):
def agg(game, team, prefix, val):
if against:
prefix = "w" if prefix == "l" else "l"
return game["{:s}{:s}".format(prefix, stat)]
return agg
def pct_agg(num_stat, denom_stat, against=False):
def agg(game, team, prefix, val):
if against:
prefix = "w" if prefix == "l" else "l"
return float(game["{:s}{:s}".format(prefix, num_stat)]) / max(1.0, float(
game["{:s}{:s}".format(prefix, denom_stat)]))
return agg
class Team:
def __init__(self, team_id):
self.id = team_id
self._db = DataHandler()
self._data = None
self._ranks = None
self._name = None
self._features = None
self._start_rank = {}
self.aggregator = AggregatorCollector([Aggregator(stat, stat_agg(stat, False)) for stat in STATS] +\
[Aggregator('fgpct', pct_agg('fga', 'fgm')),
Aggregator('fgpct3', pct_agg('fga3', 'fgm3')),
Aggregator('ftpct', pct_agg('fta', 'ftm')),
Aggregator('fgpct', pct_agg('fga', 'fgm', True)),
Aggregator('fgpct3', pct_agg('fga3', 'fgm3', True)),
Aggregator('ftpct', pct_agg('fta', 'ftm', True))] +
[Aggregator('wpct', lambda g, t, p, tv: int(p == 'w'))])
@property
def ranks(self):
if self._ranks is None:
with self._db.connector() as cur:
cur.execute("""
SELECT
orank, season, rating_day_num, sys_name
FROM
massey_ordinals
WHERE
team = ?
AND
sys_name IN ({:s})
ORDER BY
season, rating_day_num""".format(",".join("'{:s}'".format(poll) for poll in POLLS)), (self.id,))
self._ranks = list(cur)
return self._ranks
@property
def data(self):
if self._data is None:
with self._db.connector() as cur:
cur.execute("""
SELECT
*
FROM
regular_season_detailed_results
WHERE
(wteam = ? OR lteam = ?)
ORDER BY
season, daynum""", (self.id, self.id))
self._data = list(cur)
return self._data
def is_after_first_n_games(self, game, n):
return sum(1 for j in self.data if j['season'] == game['season'] and j['daynum'] < game['daynum']) > n
def get_rank_during_game(self, game):
ranks = {}
for row in self.ranks:
if row['season'] == game['season']:
if row['rating_day_num'] < game['daynum']:
ranks[row['sys_name']] = row['orank']
ranks = numpy.array(ranks.values())
ranks = ranks[ranks > 0]
if len(ranks) == 0:
return numpy.log(351) # highest possible rank
return numpy.log(numpy.median(ranks))
def start_rank(self, season):
if season not in self._start_rank:
ranks = {}
for row in self.ranks:
if row['season'] == season:
if row['sys_name'] not in ranks:
ranks[row['sys_name']] = row['orank']
ranks = numpy.array(ranks.values())
ranks = ranks[ranks > 0]
if len(ranks) == 0:
self._start_rank[season] = numpy.log(351)
else:
self._start_rank[season] = numpy.log(numpy.median(ranks))
return self._start_rank[season]
def _get_wins(self, game):
return sum(int(row['wteam'] == self.id) for row in self.data if
row['season'] == game['season'] and row['daynum'] < game['daynum'])
@property
def name(self):
if self._name is None:
with self._db.connector() as cur:
cur.execute("""SELECT team_name FROM teams WHERE team_id = ?""", (self.id,))
self._name = list(cur)[0][0]
return self._name
@property
def features(self):
if self._features is None:
self._features = {}
for game in self.data:
self.aggregator.update(game, self)
if self.is_after_first_n_games(game, 5):
aggs = self.aggregator.aggregators
key = (game['season'], game['daynum'])
start_rank = self.start_rank(game['season'])
game_rank = self.get_rank_during_game(game)
rank_ratio = numpy.log1p(numpy.exp(game_rank)) / numpy.log1p(numpy.exp(start_rank))
self._features[key] = [start_rank, game_rank, rank_ratio, self._get_wins(game)] +\
[agg.value for agg in aggs.values()]
return self._features
def __repr__(self):
return "Team {:d}".format(self.id)
def __str__(self):
return self.name
if __name__ == '__main__':
team = Team(1314)
print(str(team))
for k, v in sorted(team.features.items()):
print(k, v)
for key, value in team.aggregator.aggregators.iteritems():
print(key, value.value)
| 32.475771 | 120 | 0.504748 |
a1dbbf1ee9431d76d12bd8437d5b4556679ddc66
| 1,496 |
py
|
Python
|
stacks-and-queues/python/stack_of_plates.py
|
DatGreekChick/ctci
|
b165bd529fe3ca32cc5c95ea274080bd5689501d
|
[
"MIT"
] | null | null | null |
stacks-and-queues/python/stack_of_plates.py
|
DatGreekChick/ctci
|
b165bd529fe3ca32cc5c95ea274080bd5689501d
|
[
"MIT"
] | null | null | null |
stacks-and-queues/python/stack_of_plates.py
|
DatGreekChick/ctci
|
b165bd529fe3ca32cc5c95ea274080bd5689501d
|
[
"MIT"
] | null | null | null |
# 3.3: Stack of Plates
from stack import Node
# Runtime: O(1) - Space: O(1)
class StackInfo:
def __init__(self):
self.capacity = 3
self.size = 0
self.top = None
class SetOfStacks:
def __init__(self):
self.stacks = []
def print(self):
for idx, stack in enumerate(self.stacks):
print(
f"stack # {idx}: top: {stack.top.value} | stack size: {stack.size}"
)
def push(self, value):
new_top = Node(value)
info = self.stacks[-1] if self.stacks else StackInfo()
if info.size < info.capacity:
new_top.next = info.top
info.size += 1
info.top = new_top
else:
info = StackInfo()
info.size += 1
info.top = new_top
self.stacks.append(info)
if not self.stacks:
self.stacks.append(info)
def pop(self):
info = self.stacks[-1]
info.size -= 1
item = info.top
if info.top.next:
info.top = info.top.next
else:
self.stacks.pop()
return item
stack = SetOfStacks()
stack.push(5)
stack.push(3)
stack.push(2)
stack.push(6)
stack.push(1)
stack.print()
print("popped:", stack.pop().value)
stack.print()
print("popped:", stack.pop().value)
stack.print()
print("popped:", stack.pop().value)
stack.print()
print("popped:", stack.pop().value)
stack.print()
print("popped:", stack.pop().value)
stack.print()
| 21.070423 | 83 | 0.550802 |
df008425b89b2749e4f23a8d6b6eec6145ebe308
| 3,735 |
py
|
Python
|
src/apps/user_operation/serializers.py
|
fuxinhan/worker
|
7e8fb8dd2e0210bf12d9fd9290805d00ad895e09
|
[
"MIT"
] | null | null | null |
src/apps/user_operation/serializers.py
|
fuxinhan/worker
|
7e8fb8dd2e0210bf12d9fd9290805d00ad895e09
|
[
"MIT"
] | null | null | null |
src/apps/user_operation/serializers.py
|
fuxinhan/worker
|
7e8fb8dd2e0210bf12d9fd9290805d00ad895e09
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from .models import UserEnroll, SalesInfo, UserFavoriteCourse, UserCourse
from .models import UserDeadline, UserFavoriteOrg, UserFavoriteSketch
from courses.serializers import CourseSerializer
from sketch.models import Sketch
class UserEnrollDetailSerializer(serializers.ModelSerializer):
class Meta:
model = UserEnroll
fields = "__all__"
class UserEnrollSerializer(serializers.ModelSerializer):
add_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M')
class Meta:
model = UserEnroll
fields = ("id", "name", "birthday", "gender", "mobile", "address", "school", "enroll", "msg_ad", "remark", "add_time")
class SalesInfoDetailSerializer(serializers.ModelSerializer):
course = CourseSerializer(many=False)
student = UserEnrollSerializer(many=False)
class Meta:
model = SalesInfo
fields = "__all__"
class SalesInfoSerializer(serializers.ModelSerializer):
add_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M')
class Meta:
model = SalesInfo
fields = "__all__"
class UserFavCourseDetailSerializer(serializers.ModelSerializer):
course = CourseSerializer()
class Meta:
model = UserFavoriteCourse
fields = ("course", "id")
class UserFavCourseSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
class Meta:
model = UserFavoriteCourse
validators = [
UniqueTogetherValidator(
queryset=UserFavoriteCourse.objects.all(),
fields=('user', 'course'),
message="已经收藏"
)
]
fields = ("user", "course", "id")
class UserSketchSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
class Meta:
model = Sketch
fields = "__all__"
class UserCourseDetailSerializer(serializers.ModelSerializer):
course = CourseSerializer(many=False)
class Meta:
model = UserCourse
fields = "__all__"
class UserCourseSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
class Meta:
model = UserCourse
validators = [
UniqueTogetherValidator(
queryset=UserCourse.objects.all(),
fields=('user', 'course'),
message="课程已经购买"
)
]
fields = "__all__"
class UserDeadlineSerializer(serializers.ModelSerializer):
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
add_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M')
# expired_time = serializers.DateTimeField(read_only=True, format='%Y-%m-%d %H:%M')
def create(self, validated_data):
user = self.context["request"].user
deadline = validated_data["expired_time"]
expired_time = datetime.strptime(deadline, "%Y-%m-%d %H:%M")
existed = UserDeadline.objects.filter(user=user)
if existed:
existed = existed[0]
existed.expired_time = datetime.strftime(expired_time + timedelta(days=30), "%Y-%m-%d")
existed.save()
else:
existed.expired_time = expired_time
print("**\n**\n** %s **\n***\n") % expired_time
existed.save()
return existed
class Meta:
model = UserDeadline
fields = "__all__"
| 28.730769 | 126 | 0.65328 |
4c2cd760704ab81ae22ce54c1aa7f099a4ec141e
| 108 |
py
|
Python
|
fileupload/admin.py
|
myacera100/ai-django-fileupload
|
25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec
|
[
"MIT"
] | 1 |
2017-11-06T14:55:28.000Z
|
2017-11-06T14:55:28.000Z
|
fileupload/admin.py
|
myacera100/ai-django-fileupload
|
25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec
|
[
"MIT"
] | 1 |
2017-11-06T14:55:24.000Z
|
2017-11-06T14:55:24.000Z
|
fileupload/admin.py
|
myacera100/ai-django-fileupload
|
25c0cbd10c93a4556cec82cc5ccaf3e21e0335ec
|
[
"MIT"
] | 1 |
2021-04-05T03:41:25.000Z
|
2021-04-05T03:41:25.000Z
|
from fileupload.models import Attachment
from django.contrib import admin
admin.site.register(Attachment)
| 18 | 40 | 0.842593 |
51d19799412ffe877815b7e7705d82e9eec15e1e
| 666 |
py
|
Python
|
PyUdemy/Day10/DaysInMoth.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
PyUdemy/Day10/DaysInMoth.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
PyUdemy/Day10/DaysInMoth.py
|
JoseArtur/phyton-exercices
|
f3da4447044e445222233960f991fb2e36311131
|
[
"MIT"
] | null | null | null |
def is_leap(year):
if year % 4 == 0:
if year % 100 == 0:
if year % 400 == 0:
return("Leap year.")
else:
return("Not leap year.")
else:
return("Leap year.")
else:
return("Not leap year.")
def days_in_month(year,month):
month_days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
verifier = is_leap(year)
if verifier == "Leap Year":
month_days[1]=29
days=month_days[month-1]
else:
days=month_days[month-1]
return days
#🚨 Do NOT change any of the code below
year = int(input("Enter a year: "))
month = int(input("Enter a month: "))
days = days_in_month(year, month)
print(days)
| 22.2 | 65 | 0.582583 |
49ead08d68781124b61c2a0d1621659f11ca3922
| 1,556 |
py
|
Python
|
var/spack/repos/builtin/packages/r-distributional/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 |
2017-11-06T08:47:01.000Z
|
2022-03-31T14:45:33.000Z
|
var/spack/repos/builtin/packages/r-distributional/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 |
2017-11-04T07:49:45.000Z
|
2022-03-31T23:38:39.000Z
|
var/spack/repos/builtin/packages/r-distributional/package.py
|
LiamBindle/spack
|
e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 |
2017-11-04T07:45:50.000Z
|
2022-03-30T14:31:53.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RDistributional(RPackage):
"""Functions for Base Types and Core R and 'Tidyverse' Features.
Vectorised distribution objects with tools for manipulating, visualising,
and using probability distributions. Designed to allow model prediction
outputs to return distributions rather than their parameters, allowing
users to directly interact with predictive distributions in a
data-oriented workflow. In addition to providing generic replacements for
p/d/q/r functions, other useful statistics can be computed including
means, variances, intervals, and highest density regions."""
homepage = "https://github.com/mitchelloharawild/distributional"
cran = "distributional"
version('0.2.2', sha256='028e5a91aabe3a676eb7b7f3dc907f7f34735a123fe0d9adcabc03476504435f')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('r-generics', type=('build', 'run'))
depends_on('r-ellipsis', type=('build', 'run'))
depends_on('r-numderiv', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-scales', type=('build', 'run'))
depends_on('r-farver', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
depends_on('r-lifecycle', type=('build', 'run'))
| 44.457143 | 95 | 0.71144 |
18431c465aba5d45966a5e7ab4f24ffab6bd0bfd
| 983 |
py
|
Python
|
tests/entrypoints/test_fix_command.py
|
HiteshMah-Jan/thefuck
|
132c62262246824470934c2c6f46919ef6f00203
|
[
"MIT"
] | 75,504 |
2015-04-08T18:22:19.000Z
|
2022-03-31T23:59:52.000Z
|
tests/entrypoints/test_fix_command.py
|
parulgarg123/thefuck
|
d10fc80fa5da5724ec945aec12720ef41be9551f
|
[
"MIT"
] | 1,160 |
2015-04-17T18:47:12.000Z
|
2022-03-30T20:42:26.000Z
|
tests/entrypoints/test_fix_command.py
|
parulgarg123/thefuck
|
d10fc80fa5da5724ec945aec12720ef41be9551f
|
[
"MIT"
] | 4,399 |
2015-04-17T18:36:04.000Z
|
2022-03-31T07:01:03.000Z
|
import pytest
from mock import Mock
from thefuck.entrypoints.fix_command import _get_raw_command
class TestGetRawCommand(object):
def test_from_force_command_argument(self):
known_args = Mock(force_command=['git', 'brunch'])
assert _get_raw_command(known_args) == ['git', 'brunch']
def test_from_command_argument(self, os_environ):
os_environ['TF_HISTORY'] = None
known_args = Mock(force_command=None,
command=['sl'])
assert _get_raw_command(known_args) == ['sl']
@pytest.mark.parametrize('history, result', [
('git br', 'git br'),
('git br\nfcuk', 'git br'),
('git br\nfcuk\nls', 'ls'),
('git br\nfcuk\nls\nfuk', 'ls')])
def test_from_history(self, os_environ, history, result):
os_environ['TF_HISTORY'] = history
known_args = Mock(force_command=None,
command=None)
assert _get_raw_command(known_args) == [result]
| 36.407407 | 64 | 0.62767 |
df6b621271700b8f6ca72d2fdb40f4fe37f62a64
| 5,681 |
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/tlveditor/restriction_e362d0ce9d693ee94a071e4f973da1d3.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/tlveditor/restriction_e362d0ce9d693ee94a071e4f973da1d3.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
ixnetwork_restpy/testplatform/sessions/ixnetwork/globals/topology/tlveditor/restriction_e362d0ce9d693ee94a071e4f973da1d3.py
|
Vibaswan/ixnetwork_restpy
|
239fedc7050890746cbabd71ea1e91c68d9e5cad
|
[
"MIT"
] | null | null | null |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Restriction(Base):
"""Choices for field value
The Restriction class encapsulates a list of restriction resources that are managed by the user.
A list of resources can be retrieved from the server using the Restriction.find() method.
The list can be managed by using the Restriction.add() and Restriction.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'restriction'
_SDM_ATT_MAP = {
'Enum': 'enum',
'SingleValue': 'singleValue',
}
def __init__(self, parent):
super(Restriction, self).__init__(parent)
@property
def Enum(self):
"""
Returns
-------
- str: Internal enumeration type to be used as value options
"""
return self._get_attribute(self._SDM_ATT_MAP['Enum'])
@Enum.setter
def Enum(self, value):
self._set_attribute(self._SDM_ATT_MAP['Enum'], value)
@property
def SingleValue(self):
"""
Returns
-------
- bool: Restricts the field to single value pattern without overlays
"""
return self._get_attribute(self._SDM_ATT_MAP['SingleValue'])
@SingleValue.setter
def SingleValue(self, value):
self._set_attribute(self._SDM_ATT_MAP['SingleValue'], value)
def update(self, Enum=None, SingleValue=None):
"""Updates restriction resource on the server.
Args
----
- Enum (str): Internal enumeration type to be used as value options
- SingleValue (bool): Restricts the field to single value pattern without overlays
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Enum=None, SingleValue=None):
"""Adds a new restriction resource on the server and adds it to the container.
Args
----
- Enum (str): Internal enumeration type to be used as value options
- SingleValue (bool): Restricts the field to single value pattern without overlays
Returns
-------
- self: This instance with all currently retrieved restriction resources using find and the newly added restriction resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained restriction resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, Enum=None, SingleValue=None):
"""Finds and retrieves restriction resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve restriction resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all restriction resources from the server.
Args
----
- Enum (str): Internal enumeration type to be used as value options
- SingleValue (bool): Restricts the field to single value pattern without overlays
Returns
-------
- self: This instance with matching restriction resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of restriction data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the restriction resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 38.385135 | 172 | 0.676993 |
9f8b9a64a82f765eb41c7861aeb24f2e136a2dfd
| 2,242 |
py
|
Python
|
utest/usages/test_resource_usages.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2019-06-27T08:48:24.000Z
|
2019-06-27T08:48:24.000Z
|
utest/usages/test_resource_usages.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
utest/usages/test_resource_usages.py
|
veryl-technologies/t24-tests-ide
|
16cd803895916a785c0e1fec3f71f9388c21edc9
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import unittest
import datafilereader
from robot.utils.asserts import assert_equals
from robotide.usages.commands import FindResourceUsages
class ResourceUsageTests(unittest.TestCase):
# NOTE! The data is shared among tests
# This is for performance reasons but be warned when you add tests!
@classmethod
def setUpClass(cls):
cls.ctrl = datafilereader.construct_chief_controller(datafilereader.SIMPLE_TEST_SUITE_PATH)
cls.ts1 = datafilereader.get_ctrl_by_name('TestSuite1', cls.ctrl.datafiles)
cls.ts2 = datafilereader.get_ctrl_by_name('TestSuite2', cls.ctrl.datafiles)
cls.resu = datafilereader.get_ctrl_by_name(datafilereader.SIMPLE_TEST_SUITE_RESOURCE_NAME, cls.ctrl.datafiles)
@classmethod
def tearDownClass(cls):
cls.ctrl.close()
def test_resource_import_knows_imported_resource_controller(self):
assert_equals(self.resu, self.ts1.imports[0].get_imported_controller())
assert_equals(self.resu, self.ts2.imports[0].get_imported_controller())
def test_resource_usages_finding(self):
usages = list(self.resu.execute(FindResourceUsages()))
self._verify_length(2, usages)
self._verify_that_contains(self.ts1, usages)
self._verify_that_contains(self.ts2, usages)
def _verify_length(self, expected, usages):
assert_equals(len(usages), expected)
def _verify_that_contains(self, item, usages):
for u in usages:
if u.item == item.imports:
if item.display_name != u.name:
raise AssertionError('Name "%s" was not expected "%s"!' % (u.name, item.display_name))
return
raise AssertionError('Item %r not in usages %r!' % (item, usages))
def test_import_in_resource_file(self):
inner_resu = self.resu.imports[0].get_imported_controller()
usages = list(inner_resu.execute(FindResourceUsages()))
self._verify_length(1, usages)
self._verify_that_contains(self.resu, usages)
def test_none_existing_import(self):
imp = self.ts1.imports.add_resource('this_does_not_exists.txt')
assert_equals(imp.get_imported_controller(), None)
if __name__ == '__main__':
unittest.main()
| 40.763636 | 118 | 0.711864 |
59d7d2667eed71f55f7bf8d92cce87447657a097
| 806 |
py
|
Python
|
garbevents/settings.py
|
QingTian203/garbevents
|
b3a5e8b3a08ae9a197ae0c17d78acf53b1cc4350
|
[
"MIT"
] | 74 |
2020-07-09T13:55:08.000Z
|
2022-03-29T07:11:00.000Z
|
garbevents/settings.py
|
wuzhiqiang15/garbevents
|
76cd28ea0db8364b8e5ef2c6523f630f97a92f12
|
[
"MIT"
] | 10 |
2020-08-19T06:42:10.000Z
|
2022-03-16T10:46:25.000Z
|
garbevents/settings.py
|
wuzhiqiang15/garbevents
|
76cd28ea0db8364b8e5ef2c6523f630f97a92f12
|
[
"MIT"
] | 25 |
2020-07-14T11:01:01.000Z
|
2022-03-17T11:06:18.000Z
|
# -*- coding: utf-8 -*-
"""
Parameter initialization is a global variable by default. When calling the relevant API,
you need to inherit the setting class and set the corresponding parameters.
"""
class Settings(object):
"""
# 埋点上传url
ST.url = 'https://www.baidu.com/'
# 报告生成路径
ST.report_path = 'report'
# 所有事件名称
ST.all_events = ['event_name_1', 'event_name_2']
# 接口地址
ST.interface_url = ['apipool', 'APIPOOL']
# mock json 串
ST.mock_json = {}
# 事件配置文件
ST.events_properties = {
'graphic_switch': ['status_update', 'doc_set_num', 'doc_set_price', 'doc_set_sever_time']
}
"""
report_path = 'report' # 默认在当前路径生成report文件夹
url = None
all_events = []
interface_url = []
mock_json = {}
events_properties = {}
| 24.424242 | 101 | 0.619107 |
51f48b69f5a97792ed83236058b2281a9436bf6c
| 1,221 |
py
|
Python
|
parsec/commands/workflows/extract_workflow_from_history.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 8 |
2015-03-27T17:09:15.000Z
|
2021-07-13T15:33:02.000Z
|
parsec/commands/workflows/extract_workflow_from_history.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 30 |
2015-02-27T21:21:47.000Z
|
2021-08-31T14:19:55.000Z
|
parsec/commands/workflows/extract_workflow_from_history.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 12 |
2017-06-01T03:49:23.000Z
|
2021-07-13T15:33:06.000Z
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('extract_workflow_from_history')
@click.argument("history_id", type=str)
@click.argument("workflow_name", type=str)
@click.option(
"--job_ids",
help="Optional list of job IDs to filter the jobs to extract from the history",
type=str,
multiple=True
)
@click.option(
"--dataset_hids",
help="Optional list of dataset hids corresponding to workflow inputs when extracting a workflow from history",
type=str,
multiple=True
)
@click.option(
"--dataset_collection_hids",
help="Optional list of dataset collection hids corresponding to workflow inputs when extracting a workflow from history",
type=str,
multiple=True
)
@pass_context
@custom_exception
@json_output
def cli(ctx, history_id, workflow_name, job_ids="", dataset_hids="", dataset_collection_hids=""):
"""Extract a workflow from a history.
Output:
A description of the created workflow
"""
return ctx.gi.workflows.extract_workflow_from_history(history_id, workflow_name, job_ids=job_ids, dataset_hids=dataset_hids, dataset_collection_hids=dataset_collection_hids)
| 32.131579 | 177 | 0.763309 |
0be3913cc496bbe3ff38ff949b6ab64e88c44e6c
| 219 |
py
|
Python
|
configs/train_classifier/td_chexpert/td_chexpert_bce_50.py
|
CAMP-eXplain-AI/imba-explain
|
e41b4ca5de63955cb0e925aad9599f38c5a3e973
|
[
"MIT"
] | null | null | null |
configs/train_classifier/td_chexpert/td_chexpert_bce_50.py
|
CAMP-eXplain-AI/imba-explain
|
e41b4ca5de63955cb0e925aad9599f38c5a3e973
|
[
"MIT"
] | null | null | null |
configs/train_classifier/td_chexpert/td_chexpert_bce_50.py
|
CAMP-eXplain-AI/imba-explain
|
e41b4ca5de63955cb0e925aad9599f38c5a3e973
|
[
"MIT"
] | null | null | null |
_base_ = [
'../../_base_/classifiers/truncated_densenet.py', '../../_base_/datasets/chexpert_class_dataset.py',
'../../_base_/losses/bce.py', '../../_base_/schedules/50e.py'
]
classifier = dict(num_classes=14)
| 31.285714 | 104 | 0.6621 |
1d0e88e725777210dd8d92f72a7b3027e0ff6a17
| 1,993 |
py
|
Python
|
api/mako/models.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | null | null | null |
api/mako/models.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | 12 |
2020-02-11T23:12:23.000Z
|
2022-02-26T13:08:40.000Z
|
api/mako/models.py
|
erik-sn/mako
|
f64644dacb132b0063805ccafc3f485bdb62595b
|
[
"MIT"
] | 2 |
2018-08-10T08:14:31.000Z
|
2018-10-01T16:46:37.000Z
|
from django.db import models
from django.contrib.postgres.fields import ArrayField
FILE_EXTENSIONS = ['.txt', '.dat', '.db', '.log', '.mbd', '.sql', '.xml',\
'.py', '.c', '.cpp', '.m', '.txt', '.bat', '.bin', '.exe',\
'.jar', '.wsf', '.cgi', '.apk', '.com', '.html', '.css',\
'.rss', '.js', '.jsx', '.php', '.cs', '.java', '.h', '.o',\
'.swift', '.sh', '.vb', '.class']
class ConfigManager(models.Manager):
def load(self):
config, created = self.get_or_create(pk=1)
return config
class SiteConfig(models.Model):
show_unavailable = models.BooleanField(default=False)
throw_api_400s = models.BooleanField(default=False)
throw_api_500s = models.BooleanField(default=False)
objects = ConfigManager()
def __str__(self):
return 'Mako general site configuration'
def delete(self, *args, **kwargs):
return NotImplementedError('The site configuration cannot be deleted')
def save(self, *args, **kwargs):
self.pk = 1
super().save(*args, **kwargs)
class ImageConfig(models.Model):
valid_image_formats = ArrayField(models.CharField(max_length=10), default=['png', 'jpeg', 'jpg'])
objects = ConfigManager()
def __str__(self):
return 'Mako images configuration'
def delete(self, *args, **kwargs):
return NotImplementedError('The image configuration cannot be deleted')
def save(self, *args, **kwargs):
self.pk = 1
super().save(*args, **kwargs)
class FileConfig(models.Model):
valid_file_formats = ArrayField(models.CharField(max_length=10), default=FILE_EXTENSIONS)
objects = ConfigManager()
def __str__(self):
return 'Mako file configuration'
def delete(self, *args, **kwargs):
return NotImplementedError('The file configuration cannot be deleted')
def save(self, *args, **kwargs):
self.pk = 1
super().save(*args, **kwargs)
| 32.145161 | 101 | 0.60863 |
8338ca409c68b767a941bc11fed7f6a9d738fc72
| 36,085 |
py
|
Python
|
unfurl/unfurl/configurator.py
|
cybercur4/unfurl
|
39ba274ae50c64c176da5df45ca0cfe03eeb67a1
|
[
"MIT"
] | 1 |
2022-01-15T07:17:36.000Z
|
2022-01-15T07:17:36.000Z
|
unfurl/configurator.py
|
Md-Danish-eng/unfurl
|
a081657afbd1128e58f95d75ed8cdbdfff2dfe3f
|
[
"MIT"
] | null | null | null |
unfurl/configurator.py
|
Md-Danish-eng/unfurl
|
a081657afbd1128e58f95d75ed8cdbdfff2dfe3f
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
import six
from collections.abc import Mapping, MutableSequence
import os
import copy
from .support import Status, ResourceChanges, Priority, TopologyMap
from .result import (
serialize_value,
ChangeAware,
Results,
ResultsMap,
get_digest,
Result,
)
from .util import (
register_class,
validate_schema,
UnfurlTaskError,
UnfurlAddingResourceError,
filter_env,
to_enum,
wrap_sensitive_value,
sensitive,
)
from . import merge
from .eval import Ref, map_value, RefContext
from .runtime import RelationshipInstance, Operational
from .yamlloader import yaml
from .projectpaths import WorkFolder, Folders
from .planrequests import (
TaskRequest,
JobRequest,
ConfigurationSpec, # import used by unit tests
create_task_request,
find_operation_host,
create_instance_from_spec,
)
import logging
logger = logging.getLogger("unfurl.task")
class ConfiguratorResult:
"""
Modified indicates whether the underlying state of configuration,
was changed i.e. the physically altered the system this configuration represents.
status reports the Status of the current configuration.
"""
def __init__(
self,
success,
modified,
status=None,
configChanged=None,
result=None,
outputs=None,
exception=None,
):
self.modified = modified
self.status = to_enum(Status, status)
self.configChanged = configChanged
self.result = result
self.success = success
self.outputs = outputs
self.exception = None
def __str__(self):
result = "" if self.result is None else str(self.result)[:240] + "..."
return (
"changes: "
+ (
" ".join(
filter(
None,
[
self.success and "success",
self.modified and "modified",
self.status is not None and self.status.name,
],
)
)
or "none"
)
+ "\n "
+ result
)
class AutoRegisterClass(type):
def __new__(mcls, name, bases, dct):
cls = type.__new__(mcls, name, bases, dct)
if cls.short_name:
name = cls.short_name
elif name.endswith("Configurator"):
name = name[: -len("Configurator")]
if name:
register_class(cls.__module__ + "." + cls.__name__, cls, name)
return cls
@six.add_metaclass(AutoRegisterClass)
class Configurator:
short_name = None
"""shortName can be used to customize the "short name" of the configurator
as an alternative to using the full name ("module.class") when setting the implementation on an operation.
(Titlecase recommended)"""
exclude_from_digest = ()
@classmethod
def set_config_spec_args(klass, kw: dict, target):
return kw
def __init__(self, configurationSpec):
self.configSpec = configurationSpec
def get_generator(self, task):
return self.run(task)
def render(self, task):
"""
This method is called is called during the planning phase to give the configurator an
opportunity to do early validation and error detection and generate any plan information or configuration files that the user may want to review before the running the deployment task.
Property access and writes will be tracked and used to establish dynamic dependencies between instances so the plan can be ordered properly. Any updates made to instances maybe reverted if it has dependencies on attributes that might be changed later in the plan, so this method should be idempotent.
Returns:
The value returned here will subsequently be available as ``task.rendered``
"""
return None
# yields a JobRequest, TaskRequest or a ConfiguratorResult
def run(self, task):
"""
This should perform the operation specified in the :class:`ConfigurationSpec`
on the :obj:`task.target`.
Args:
task (:class:`TaskView`) The task currently running.
Yields:
Should yield either a :class:`JobRequest`, :class:`TaskRequest`
or a :class:`ConfiguratorResult` when done
"""
yield task.done(False)
def can_dry_run(self, task):
"""
Returns whether this configurator can handle a dry-runs for the given task.
(And should check :attr:`.TaskView.dry_run` in during run().
Args:
task (:obj:`TaskView`) The task about to be run.
Returns:
bool
"""
return False
def can_run(self, task):
"""
Return whether or not the configurator can execute the given task
depending on if this configurator support the requested action and parameters
and given the current state of the target instance?
Args:
task (:class:`TaskView`) The task that is about to be run.
Returns:
(bool or str): Should return True or a message describing why the task couldn't be run.
"""
return True
def should_run(self, task):
"""Does this configuration need to be run?"""
return self.configSpec.should_run()
def save_digest(self, task):
"""
Generate a compact, deterministic representation of the current configuration.
This is saved in the job log and used by `check_digest` in subsequent jobs to
determine if the configuration changed the operation needs to be re-run.
The default implementation calculates a SHA1 digest of the values of the inputs
that where accessed while that task was run, with the exception of
the input parameters listed in `exclude_from_digest`.
Args:
task (:class:`TaskView`) The task that executed this operation.
Returns:
dict: A dictionary whose keys are strings that start with "digest"
"""
# XXX user definition should be able to exclude inputs from digest
inputs = task._resolved_inputs
# sensitive values are always redacted so no point in including them in the digest
# (for cleaner output and security-in-depth)
keys = [
k
for k in inputs.keys()
if k not in self.exclude_from_digest
and not isinstance(inputs[k].resolved, sensitive)
]
values = [inputs[key] for key in keys]
for dep in task.dependencies:
if not isinstance(dep.expected, sensitive):
keys.append(dep.expr)
values.append(dep.expected)
if keys:
inputdigest = get_digest(values, manifest=task._manifest)
else:
inputdigest = ""
digest = dict(digestKeys=",".join(keys), digestValue=inputdigest)
task.logger.debug(
"digest for %s: %s=%s", task.target.name, digest["digestKeys"], inputdigest
)
return digest
def check_digest(self, task, changeset):
"""
Examine the previous :class:`ChangeRecord` generated by the previous time this operation
was performed on the target instance and return whether it should be rerun or not.
The default implementation recalculates the digest of input parameters that
were accessed in the previous run.
Args:
task (:class:`TaskView`) The task that might execute this operation.
Args:
changest (:class:`ChangeRecord`) The task that might execute this operation.
Returns:
bool: True if configuration's digest has changed, False if it is the same.
"""
_parameters = getattr(changeset, "digestKeys", "")
newKeys = {k for k in task.inputs.keys() if k not in self.exclude_from_digest}
task.logger.debug("checking digest for %s: %s", task.target.name, _parameters)
if not _parameters:
return bool(newKeys)
keys = _parameters.split(",")
oldInputs = {key for key in keys if "::" not in key}
if oldInputs - newKeys:
return True # an old input was removed
# only resolve the inputs and dependencies that were resolved before
results = []
for key in keys:
if "::" in key:
results.append(Ref(key).resolve(task.inputs.context, wantList="result"))
else:
results.append(task.inputs._getresult(key))
newDigest = get_digest(results, manifest=task._manifest)
mismatch = changeset.digestValue != newDigest
if mismatch:
task.logger.verbose(
"digests didn't match for %s with %s: old %s, new %s",
task.target.name,
_parameters,
changeset.digestValue,
newDigest,
)
return mismatch
class _ConnectionsMap(dict):
def by_type(self):
# return unique connection by type
# reverse so nearest relationships replace less specific ones that have matching names
by_type = { # the list() is for Python 3.7
rel.type: rel for rel in reversed(list(self.values()))
}
return by_type.values()
def __missing__(self, key):
# the more specific connections are inserted first so this should find
# the most relevant connection of the given type
for value in self.values():
if isinstance(value, Result):
value = value.resolved
if (
value.template.is_compatible_type(key)
# hackish: match the local name of type
or key == value.type.rpartition(".")[2]
):
return value
raise KeyError(key)
class TaskView:
"""The interface presented to configurators.
The following public attributes are available:
Attributes:
target: The instance this task is operating on.
reason (str): The reason this operation was planned. See :class:`~unfurl.support.Reason`
cwd (str): Current working directory
dry_run (bool): Dry run only
verbose (int): Verbosity level set for this job (-1 error, 0 normal, 1 verbose, 2 debug)
"""
def __init__(self, manifest, configSpec, target, reason=None, dependencies=None):
# public:
self.configSpec = configSpec
self.target = target
self.reason = reason
self.logger = logger
self.cwd = os.path.abspath(self.target.base_dir)
self.rendered = None
# private:
self._errors = [] # UnfurlTaskError objects appends themselves to this list
self._inputs = None
self._environ = None
self._manifest = manifest
self.messages = []
self._addedResources = []
self._dependenciesChanged = False
self.dependencies = dependencies or []
self._resourceChanges = ResourceChanges()
self._workFolders = {}
# public:
self.operation_host = find_operation_host(target, configSpec.operation_host)
@property
def inputs(self):
"""
Exposes inputs and task settings as expression variables, so they can be accessed like:
eval: $inputs::param
or in jinja2 templates:
{{ inputs.param }}
"""
if self._inputs is None:
assert self._attributeManager
assert self.target.root.attributeManager is self._attributeManager
# deepcopy because ResultsMap might modify interior maps and lists
inputs = copy.deepcopy(self.configSpec.inputs)
relationship = isinstance(self.target, RelationshipInstance)
if relationship:
target = self.target.target
else:
target = self.target
HOST = (target.parent or target).attributes
ORCHESTRATOR = target.root.find_instance_or_external("localhost")
vars = dict(
inputs=inputs,
task=self.get_settings(),
connections=self._get_connections(),
TOPOLOGY=dict(
inputs=target.root._attributes["inputs"],
outputs=target.root._attributes["outputs"],
),
NODES=TopologyMap(target.root),
SELF=self.target.attributes,
HOST=HOST,
ORCHESTRATOR=ORCHESTRATOR and ORCHESTRATOR.attributes or {},
OPERATION_HOST=self.operation_host
and self.operation_host.attributes
or {},
)
if relationship:
vars["SOURCE"] = self.target.source.attributes
vars["TARGET"] = target.attributes
# expose inputs lazily to allow self-referencee
ctx = RefContext(self.target, vars, task=self)
if self.configSpec.artifact and self.configSpec.artifact.base_dir:
ctx.base_dir = self.configSpec.artifact.base_dir
self._inputs = ResultsMap(inputs, ctx)
return self._inputs
@property
def vars(self):
"""
A dictionary of the same variables that are available to expressions when evaluating inputs.
"""
return self.inputs.context.vars
@staticmethod
def _get_connection(source, target, seen):
"""
Find the requirements on source that match the target
If source is root, requirements will be the connections that are the default_for the target.
"""
if source is target:
return None
for rel in source.get_requirements(target):
if id(rel) not in seen:
seen[id(rel)] = rel
def _get_connections(self):
"""
Build a dictionary of connections by looking for instances that the task's implementation
might to connect to (transitively following the target's hostedOn relationship)
and adding any connections (relationships) that the operation_host has with those instances.
Then add any default connections, prioritizing default connections to those instances.
(Connections that explicity set a ``default_for`` key that matches those instances.)
"""
seen = {}
for parent in self.target.ancestors:
if parent is self.target.root:
break
if self.operation_host:
self._get_connection(self.operation_host, parent, seen)
self._get_connection(self.target.root, parent, seen)
# get the rest of the default connections
self._get_connection(self.target.root, None, seen)
# reverse so nearest relationships replace less specific ones that have matching names
connections = _ConnectionsMap( # the list() is for Python 3.7
(rel.name, rel) for rel in reversed(list(seen.values()))
)
return connections
def _find_relationship_env_vars(self):
"""
Collect any environment variables set by the connections returned by ``_get_connections()``.
Motivating example:
Consider an operation whose target is a Kubernetes cluster hosted on GCP.
The operation_host's connections to those instances might set KUBECONFIG and GOOGLE_APPLICATION_CREDENTIALS
respectively and the operation's implementation will probably need both those set when it executes.
"""
env = {}
t = lambda datatype: datatype.type == "unfurl.datatypes.EnvVar"
for rel in self._get_connections().by_type(): # only one per connection type
env.update(rel.merge_props(t, True))
return env
def get_environment(self, addOnly):
"""Return a dictionary of environment variables applicable to this task.
Args:
addOnly (bool): If addOnly is False all variables in the current os environment will be included
otherwise only variables added will be included.
Returns:
:dict:
Variable sources (by order of preference, lowest to highest):
1. The ensemble's environment
2. Variables set by the connections that are available to this operation.
3. Variables declared in the operation's ``environment`` section.
"""
env = os.environ.copy()
# build rules by order of preference (last overrides first):
# 1. ensemble's environment
# 2. variables set by connections
# 3. operation's environment
# we use merge.copy() to preserve basedir
rules = merge.copy(self.target.root.envRules)
rules.update(self._find_relationship_env_vars())
if self.configSpec.environment:
rules.update(self.configSpec.environment)
# apply rules
rules = serialize_value(
map_value(rules, self.inputs.context), resolveExternal=True
)
env = filter_env(rules, env, addOnly=addOnly)
# add the variables required by TOSCA 1.3 spec
targets = []
if isinstance(self.target, RelationshipInstance):
targets = [
c.tosca_id
for c in self.target.target.get_capabilities(
self.target.capability.template.name
)
]
env.update(
dict(
TARGETS=",".join(targets),
TARGET=self.target.target.tosca_id,
SOURCES=",".join(
[
r.tosca_id
for r in self.target.source.get_requirements(
self.target.requirement.template.name
)
]
),
SOURCE=self.target.source.tosca_id,
)
)
return env
def get_settings(self):
return dict(
verbose=self.verbose,
name=self.configSpec.name,
dryrun=self.dry_run,
workflow=self.configSpec.workflow,
operation=self.configSpec.operation,
timeout=self.configSpec.timeout,
target=self.target.name,
reason=self.reason,
cwd=self.cwd,
)
def find_connection(self, target, relation="tosca.relationships.ConnectsTo"):
connection = self.query(
f"$OPERATION_HOST::.requirements::*[.type={relation}][.target=$target]",
vars=dict(target=target),
)
# alternative query: [.type=unfurl.nodes.K8sCluster]::.capabilities::.relationships::[.type=unfurl.relationships.ConnectsTo.K8sCluster][.source=$OPERATION_HOST]
if not connection:
# no connection, see if there's a default relationship template defined for this target
endpoints = target.get_default_relationships(relation)
if endpoints:
connection = endpoints[0]
return connection
def sensitive(self, value):
"""Mark the given value as sensitive. Sensitive values will be encrypted or redacted when outputed.
Returns:
sensitive: A copy of the value converted the appropriate subtype of :class:`unfurl.logs.sensitive` value or the value itself if it can't be converted.
"""
return wrap_sensitive_value(value)
def add_message(self, message):
self.messages.append(message)
def find_instance(self, name):
return self._manifest.get_root_resource().find_instance_or_external(name)
# XXX
# def pending(self, modified=None, sleep=100, waitFor=None, outputs=None):
# """
# >>> yield task.pending(60)
#
# set modified to True to advise that target has already been modified
#
# outputs to share operation outputs so far
# """
def done(
self,
success=None,
modified=None,
status=None,
result=None,
outputs=None,
captureException=None,
):
"""`run()` should call this method and yield its return value before terminating.
>>> yield task.done(True)
Args:
success (bool): indicates if this operation completed without an error.
modified (bool): (optional) indicates whether the physical instance was modified by this operation.
status (Status): (optional) should be set if the operation changed the operational status of the target instance.
If not specified, the runtime will updated the instance status as needed, based
the operation preformed and observed changes to the instance (attributes changed).
result (dict): (optional) A dictionary that will be serialized as YAML into the changelog, can contain any useful data about these operation.
outputs (dict): (optional) Operation outputs, as specified in the topology template.
Returns:
:class:`ConfiguratorResult`
"""
if success is None:
success = not self._errors
if isinstance(modified, Status):
status = modified
modified = True
kw = dict(result=result, outputs=outputs)
if captureException is not None:
logLevel = logging.DEBUG if success else logging.ERROR
kw["exception"] = UnfurlTaskError(self, captureException, logLevel)
return ConfiguratorResult(success, modified, status, **kw)
# updates can be marked as dependencies (changes to dependencies changed) or required (error if changed)
# configuration has cumulative set of changes made it to resources
# updates update those changes
# other configurations maybe modify those changes, triggering a configuration change
def query(
self,
query,
dependency=False,
name=None,
required=False,
wantList=False,
resolveExternal=True,
strict=True,
vars=None,
throw=False,
):
# XXX pass resolveExternal to context?
try:
result = Ref(query, vars=vars).resolve(
self.inputs.context, wantList, strict
)
except:
if not throw:
UnfurlTaskError(
self, f"error while evaluating query: {query}", logging.WARNING
)
return None
raise
if dependency:
self.add_dependency(
query, result, name=name, required=required, wantList=wantList
)
return result
def add_dependency(
self,
expr,
expected=None,
schema=None,
name=None,
required=True,
wantList=False,
target=None,
):
getter = getattr(expr, "as_ref", None)
if getter:
# expr is a configuration or resource or ExternalValue
expr = Ref(getter()).source
dependency = Dependency(
expr, expected, schema, name, required, wantList, target
)
for i, dep in enumerate(self.dependencies):
if dep.expr == expr or dep.name == name:
self.dependencies[i] = dependency
break
else:
self.dependencies.append(dependency)
self._dependenciesChanged = True
return dependency
def remove_dependency(self, name):
for i, dep in enumerate(self.dependencies):
if dep.name == name:
self.dependencies.pop(i)
self._dependenciesChanged = True
return dep
return None
# def createConfigurationSpec(self, name, configSpec):
# if isinstance(configSpec, six.string_types):
# configSpec = yaml.load(configSpec)
# return self._manifest.loadConfigSpec(name, configSpec)
def create_sub_task(
self, operation=None, resource=None, inputs=None, persist=False, required=None
):
"""Create a subtask that will be executed if yielded by `run()`
Args:
operation (str): The operation call (like ``interface.operation``)
resource (:class:`NodeInstance`) The current target if missing.
Returns:
:class:`TaskRequest`
"""
if resource is None:
resource = self.target
if inputs is None:
inputs = self.configSpec.inputs
if not operation:
operation = f"{self.configSpec.interface}.{self.configSpec.operation}"
if isinstance(operation, six.string_types):
taskRequest = create_task_request(
self.job.jobOptions,
operation,
resource,
"subtask: " + self.configSpec.name,
inputs,
# filter has matched this parent task, don't apply it again
skip_filter=True,
)
if not taskRequest or taskRequest.error:
return None
else:
taskRequest.persist = persist
taskRequest.required = required
return taskRequest
# XXX:
# # Configurations created by subtasks are transient insofar as the are not part of the spec,
# # but they are recorded as part of the resource's configuration state.
# # Marking as persistent or required will create a dependency on the new configuration.
# if persist or required:
# expr = "::%s::.configurations::%s" % (configSpec.target, configSpec.name)
# self.add_dependency(expr, required=required)
# otherwise operation should be a ConfigurationSpec
return TaskRequest(operation, resource, "subtask", persist, required)
def _update_instance(self, existingResource, resourceSpec):
from .manifest import Manifest
updated = False
# XXX2 if spec is defined (not just status), there should be a way to
# indicate this should replace an existing resource or throw an error
if "readyState" in resourceSpec:
# we need to set this explicitly for the attribute manager to track status
# XXX track all status attributes (esp. state and created) and remove this hack
operational = Manifest.load_status(resourceSpec)
if operational.local_status is not None:
existingResource.local_status = operational.local_status
if operational.state is not None:
existingResource.state = operational.state
updated = True
attributes = resourceSpec.get("attributes")
if attributes:
for key, value in map_value(attributes, existingResource).items():
existingResource.attributes[key] = value
self.logger.debug(
"setting attribute %s with %s on %s",
key,
value,
existingResource.name,
)
updated = True
return updated
def _parse_instances_tpl(self, instances):
if isinstance(instances, six.string_types):
try:
instances = yaml.load(instances)
except:
err = UnfurlTaskError(self, f"unable to parse as YAML: {instances}")
return None, err
if isinstance(instances, Mapping):
instances = [instances]
elif not isinstance(instances, MutableSequence):
err = UnfurlTaskError(
self,
f"update_instances requires a list of updates, not a {type(instances)}",
)
return None, err
return instances, None
# # XXX how can we explicitly associate relations with target resources etc.?
# # through capability attributes and dependencies/relationship attributes
def update_instances(self, instances):
"""Notify Unfurl of new or changes to instances made while the configurator was running.
Operational status indicates if the instance currently exists or not.
This will queue a new child job if needed.
.. code-block:: YAML
- name: aNewInstance
template: aNodeTemplate
parent: HOST
attributes:
anAttribute: aValue
readyState:
local: ok
state: state
- name: SELF
attributes:
anAttribute: aNewValue
Args:
instances (list or str): Either a list or string that is parsed as YAML.
Returns:
:class:`JobRequest`: To run the job based on the supplied spec
immediately, yield the returned JobRequest.
"""
instances, err = self._parse_instances_tpl(instances)
if err:
return None, [err]
errors = []
newResources = []
newResourceSpecs = []
for resourceSpec in instances:
# we might have items that aren't resource specs
if not isinstance(resourceSpec, Mapping):
continue
# XXX deepcopy fails in test_terraform
# originalResourceSpec = copy.deepcopy(resourceSpec)
originalResourceSpec = copy.copy(resourceSpec)
rname = resourceSpec.get("name", "SELF")
if rname == ".self" or rname == "SELF":
existingResource = self.target
rname = existingResource.name
else:
existingResource = self.find_instance(rname)
newResource = None
try:
if existingResource:
updated = self._update_instance(existingResource, resourceSpec)
if updated:
self.logger.info("updating instances %s", existingResource.name)
else:
newResource = create_instance_from_spec(
self._manifest, self.target, rname, resourceSpec
)
# XXX
# if resource.required or resourceSpec.get("dependent"):
# self.add_dependency(resource, required=resource.required)
except:
errors.append(
UnfurlAddingResourceError(self, originalResourceSpec, rname)
)
else:
if newResource:
newResourceSpecs.append(originalResourceSpec)
newResources.append(newResource)
if newResourceSpecs:
self._resourceChanges.add_resources(newResourceSpecs)
self._addedResources.extend(newResources)
self.logger.info("add resources %s", newResources)
jobRequest = JobRequest(newResources, errors)
if self.job:
self.job.jobRequestQueue.append(jobRequest)
return jobRequest, errors
return None, errors
def set_work_folder(self, location="operation", preserve=None) -> WorkFolder:
if location in self._workFolders:
return self._workFolders[location]
if preserve is None:
preserve = True if location in Folders.Persistent else False
wf = WorkFolder(self, location, preserve)
self._workFolders[location] = wf
return wf
# XXX multiple tasks can be accessing the same workfolder, so:
# return self.job.setFolder(
# self, location, preserve
# )
def get_work_folder(self, location=None):
# return self.job.getFolder(self, location)
if location is None:
# XXX error if there is more than one?
return next(iter(self._workFolders.values()))
else:
return self._workFolders[location]
def discard_work_folders(self):
while self._workFolders:
_, wf = self._workFolders.popitem()
wf.discard()
def fail_work_folders(self):
while self._workFolders:
_, wf = self._workFolders.popitem()
wf.failed()
def apply_work_folders(self, *names):
if not names: # no args were passed, apply them all
names = self._workFolders.keys()
for name in names:
wf = self._workFolders.get(name)
if wf:
wf.apply()
class Dependency(Operational):
"""Represents a runtime dependency for a configuration.
Dependencies are used to determine if a configuration needs re-run.
They are automatically created when configurator accesses live attributes
while handling a task. They also can be created when the configurator
invokes these apis: `create_sub_task, `update_instances`, query`, `add_dependency`.
"""
def __init__(
self,
expr,
expected=None,
schema=None,
name=None,
required=False,
wantList=False,
target=None,
):
"""
if schema is not None, validate the result using schema
if expected is not None, test that result equals expected
otherwise test that result isn't empty has not changed since the last attempt
"""
assert not (expected and schema)
self.expr = expr
self.expected = expected
self.schema = schema
self._required = required
self.name = name or expr
self.wantList = wantList
self.target = target
@property
def local_status(self):
if self.target and self.target is not self.target.root:
return self.target.status
else: # the root has inputs which don't have operational status
return Status.ok
@property
def priority(self):
return Priority.required if self._required else Priority.optional
def refresh(self, config):
if self.expected is not None:
changeId = config.changeId
context = RefContext(
config.target, dict(val=self.expected, changeId=changeId)
)
result = Ref(self.expr).resolve(context, wantList=self.wantList)
self.target = context._lastResource
self.expected = result
@staticmethod
def has_value_changed(value, changeset):
if isinstance(value, Results):
return Dependency.has_value_changed(value._attributes, changeset)
elif isinstance(value, Mapping):
if any(Dependency.has_value_changed(v, changeset) for v in value.values()):
return True
elif isinstance(value, (MutableSequence, tuple)):
if any(Dependency.has_value_changed(v, changeset) for v in value):
return True
elif isinstance(value, ChangeAware):
return value.has_changed(changeset)
else:
return False
def has_changed(self, config):
changeId = config.changeId
context = RefContext(config.target, dict(val=self.expected, changeId=changeId))
result = Ref(self.expr).resolve_one(context) # resolve(context, self.wantList)
if self.schema:
# result isn't as expected, something changed
if not validate_schema(result, self.schema):
return False
else:
if self.expected is not None:
expected = map_value(self.expected, context)
if result != expected:
logger.debug("has_changed: %s != %s", result, expected)
return True
elif not result:
# if expression no longer true (e.g. a resource wasn't found), then treat dependency as changed
return True
if self.has_value_changed(result, config):
return True
return False
| 36.746436 | 308 | 0.60291 |
92a4ed9d1b02b073a82d4945184f91a19b59ee57
| 571 |
py
|
Python
|
main.py
|
Rajmeet/Image-to-Speech
|
b91d37c9133711818f167971f0e690557c83e2e2
|
[
"MIT"
] | null | null | null |
main.py
|
Rajmeet/Image-to-Speech
|
b91d37c9133711818f167971f0e690557c83e2e2
|
[
"MIT"
] | null | null | null |
main.py
|
Rajmeet/Image-to-Speech
|
b91d37c9133711818f167971f0e690557c83e2e2
|
[
"MIT"
] | null | null | null |
# **** IMPORTING **** #
import cv2
import os
from gtts import gTTS
from PIL import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = (r"C:\Program Files\Tesseract-OCR\tesseract.exe")
# **** CONVERSION **** #
def ImageConversionToText(file):
img = cv2.imread(file)
text = pytesseract.image_to_string(img)
print(text)
gtts = gTTS(text)
gtts.save('audio.mp3')
os.system('audio.mp3')
# **** MAIN **** #
file = input("Image name: (Needs to be in the same directory): ")+'.png'
ImageConversionToText(file)
| 21.961538 | 90 | 0.646235 |
fbbed3a6b82ed01347855b5bca7de426e9f43e08
| 45,919 |
py
|
Python
|
sphinx/domains/std.py
|
eamanu/sphinx
|
8f184e07bbc47228e18fa63e6746bc894816fd88
|
[
"BSD-2-Clause"
] | null | null | null |
sphinx/domains/std.py
|
eamanu/sphinx
|
8f184e07bbc47228e18fa63e6746bc894816fd88
|
[
"BSD-2-Clause"
] | 7 |
2021-03-30T14:10:56.000Z
|
2022-03-12T00:43:13.000Z
|
virtual/lib/python3.6/site-packages/sphinx/domains/std.py
|
Mercy-Njoroge/blog
|
404336fb0fc8d172ddde8b744042cb3f37d89c65
|
[
"MIT"
] | null | null | null |
"""
sphinx.domains.std
~~~~~~~~~~~~~~~~~~
The standard domain.
:copyright: Copyright 2007-2020 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import unicodedata
import warnings
from copy import copy
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
from typing import cast
from docutils import nodes
from docutils.nodes import Element, Node, system_message
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref
from sphinx.deprecation import RemovedInSphinx40Warning, RemovedInSphinx50Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import ws_re, logging, docname_join
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import clean_astext, make_id, make_refnode
from sphinx.util.typing import RoleFunction
if False:
# For type annotation
from typing import Type # for python3.5.1
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.environment import BuildEnvironment
logger = logging.getLogger(__name__)
# RE for option descriptions
option_desc_re = re.compile(r'((?:/|--|-|\+)?[^\s=[]+)(=?\s*.*)')
# RE for grammar tokens
token_re = re.compile(r'`(\w+)`', re.U)
class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
indextemplate = ''
parse_node = None # type: Callable[[GenericObject, BuildEnvironment, str, desc_signature], str] # NOQA
def handle_signature(self, sig: str, signode: desc_signature) -> str:
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
signode.clear()
signode += addnodes.desc_name(sig, sig)
# normalize whitespace like XRefRole does
name = ws_re.sub(' ', sig)
return name
def add_target_and_index(self, name: str, sig: str, signode: desc_signature) -> None:
node_id = make_id(self.env, self.state.document, self.objtype, name)
signode['ids'].append(node_id)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will be removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(name)
if old_node_id not in self.state.document.ids and old_node_id not in signode['ids']:
signode['ids'].append(old_node_id)
self.state.document.note_explicit_target(signode)
if self.indextemplate:
colon = self.indextemplate.find(':')
if colon != -1:
indextype = self.indextemplate[:colon].strip()
indexentry = self.indextemplate[colon + 1:].strip() % (name,)
else:
indextype = 'single'
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry, node_id, '', None))
std = cast(StandardDomain, self.env.get_domain('std'))
std.note_object(self.objtype, name, node_id, location=signode)
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id for generic objects.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return self.objtype + '-' + name
class EnvVar(GenericObject):
indextemplate = _('environment variable; %s')
class EnvVarXRefRole(XRefRole):
"""
Cross-referencing role for environment variables (adds an index entry).
"""
def result_nodes(self, document: nodes.document, env: "BuildEnvironment", node: Element,
is_ref: bool) -> Tuple[List[Node], List[system_message]]:
if not is_ref:
return [node], []
varname = node['reftarget']
tgtid = 'index-%s' % env.new_serialno('index')
indexnode = addnodes.index()
indexnode['entries'] = [
('single', varname, tgtid, '', None),
('single', _('environment variable; %s') % varname, tgtid, '', None)
]
targetnode = nodes.target('', '', ids=[tgtid])
document.note_explicit_target(targetnode)
return [indexnode, targetnode, node], []
class Target(SphinxDirective):
"""
Generic target for user-defined cross-reference types.
"""
indextemplate = ''
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
# normalize whitespace in fullname like XRefRole does
fullname = ws_re.sub(' ', self.arguments[0].strip())
node_id = make_id(self.env, self.state.document, self.name, fullname)
node = nodes.target('', '', ids=[node_id])
self.set_source_info(node)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will be removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(fullname)
if old_node_id not in self.state.document.ids and old_node_id not in node['ids']:
node['ids'].append(old_node_id)
self.state.document.note_explicit_target(node)
ret = [node] # type: List[Node]
if self.indextemplate:
indexentry = self.indextemplate % (fullname,)
indextype = 'single'
colon = indexentry.find(':')
if colon != -1:
indextype = indexentry[:colon].strip()
indexentry = indexentry[colon + 1:].strip()
inode = addnodes.index(entries=[(indextype, indexentry, node_id, '', None)])
ret.insert(0, inode)
name = self.name
if ':' in self.name:
_, name = self.name.split(':', 1)
std = cast(StandardDomain, self.env.get_domain('std'))
std.note_object(name, fullname, node_id, location=node)
return ret
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id for targets.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return self.name + '-' + name
class Cmdoption(ObjectDescription):
"""
Description of a command-line option (.. option).
"""
def handle_signature(self, sig: str, signode: desc_signature) -> str:
"""Transform an option description into RST nodes."""
count = 0
firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
m = option_desc_re.match(potential_option)
if not m:
logger.warning(__('Malformed option description %r, should '
'look like "opt", "-opt args", "--opt args", '
'"/opt args" or "+opt args"'), potential_option,
location=signode)
continue
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
signode['allnames'] = [optname]
else:
signode['allnames'].append(optname)
count += 1
if not firstname:
raise ValueError
return firstname
def add_target_and_index(self, firstname: str, sig: str, signode: desc_signature) -> None:
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
prefixes = ['cmdoption']
if currprogram:
prefixes.append(currprogram)
if not optname.startswith(('-', '/')):
prefixes.append('arg')
prefix = '-'.join(prefixes)
node_id = make_id(self.env, self.state.document, prefix, optname)
signode['ids'].append(node_id)
self.state.document.note_explicit_target(signode)
domain = cast(StandardDomain, self.env.get_domain('std'))
for optname in signode.get('allnames', []):
domain.add_program_option(currprogram, optname,
self.env.docname, signode['ids'][0])
# create an index entry
if currprogram:
descr = _('%s command line option') % currprogram
else:
descr = _('command line option')
for option in sig.split(', '):
entry = '; '.join([descr, option])
self.indexnode['entries'].append(('pair', entry, signode['ids'][0], '', None))
class Program(SphinxDirective):
"""
Directive to name the program for which options are documented.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
program = ws_re.sub('-', self.arguments[0].strip())
if program == 'None':
self.env.ref_context.pop('std:program', None)
else:
self.env.ref_context['std:program'] = program
return []
class OptionXRefRole(XRefRole):
def process_link(self, env: "BuildEnvironment", refnode: Element, has_explicit_title: bool,
title: str, target: str) -> Tuple[str, str]:
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line: str) -> List[Optional[str]]:
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env: "BuildEnvironment", textnodes: Iterable[Node], index_key: str,
source: str, lineno: int, node_id: str = None,
document: nodes.document = None) -> nodes.term:
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
term.source = source
term.line = lineno
termtext = term.astext()
if node_id:
# node_id is given from outside (mainly i18n module), use it forcedly
term['ids'].append(node_id)
elif document:
node_id = make_id(env, document, 'term', termtext)
term['ids'].append(node_id)
document.note_explicit_target(term)
else:
warnings.warn('make_glossary_term() expects document is passed as an argument.',
RemovedInSphinx40Warning, stacklevel=2)
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
node_id = nodes.make_id('term-' + termtext)
if node_id == 'term':
# "term" is not good for node_id. Generate it by sequence number instead.
node_id = 'term-%d' % env.new_serialno('glossary')
while node_id in gloss_entries:
node_id = 'term-%d' % env.new_serialno('glossary')
gloss_entries.add(node_id)
term['ids'].append(node_id)
std = cast(StandardDomain, env.get_domain('std'))
std.note_object('term', termtext, node_id, location=term)
# add an index entry too
indexnode = addnodes.index()
indexnode['entries'] = [('single', termtext, node_id, 'main', index_key)]
indexnode.source, indexnode.line = term.source, term.line
term.append(indexnode)
return term
class Glossary(SphinxDirective):
"""
Directive to create a glossary with cross-reference targets for :term:
roles.
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'sorted': directives.flag,
}
def run(self) -> List[Node]:
node = addnodes.glossary()
node.document = self.state.document
# This directive implements a custom format of the reST definition list
# that allows multiple lines of terms before the definition. This is
# easy to parse since we know that the contents of the glossary *must
# be* a definition list.
# first, collect single entries
entries = [] # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
in_definition = True
in_comment = False
was_empty = True
messages = [] # type: List[Node]
for line, (source, lineno) in zip(self.content, self.content.items):
# empty line -> add to last definition
if not line:
if in_definition and entries:
entries[-1][1].append('', source, lineno)
was_empty = True
continue
# unindented line -> a term
if line and not line[0].isspace():
# enable comments
if line.startswith('.. '):
in_comment = True
continue
else:
in_comment = False
# first term of definition
if in_definition:
if not was_empty:
messages.append(self.state.reporter.warning(
_('glossary term must be preceded by empty line'),
source=source, line=lineno))
entries.append(([(line, source, lineno)], StringList()))
in_definition = False
# second term and following
else:
if was_empty:
messages.append(self.state.reporter.warning(
_('glossary terms must not be separated by empty lines'),
source=source, line=lineno))
if entries:
entries[-1][0].append((line, source, lineno))
else:
messages.append(self.state.reporter.warning(
_('glossary seems to be misformatted, check indentation'),
source=source, line=lineno))
elif in_comment:
pass
else:
if not in_definition:
# first line of definition, determines indentation
in_definition = True
indent_len = len(line) - len(line.lstrip())
if entries:
entries[-1][1].append(line[indent_len:], source, lineno)
else:
messages.append(self.state.reporter.warning(
_('glossary seems to be misformatted, check indentation'),
source=source, line=lineno))
was_empty = False
# now, parse all the entries into a big definition list
items = []
for terms, definition in entries:
termtexts = [] # type: List[str]
termnodes = [] # type: List[Node]
system_messages = [] # type: List[Node]
for line, source, lineno in terms:
parts = split_term_classifiers(line)
# parse the term with inline markup
# classifiers (parts[1:]) will not be shown on doctree
textnodes, sysmsg = self.state.inline_text(parts[0], lineno)
# use first classifier as a index key
term = make_glossary_term(self.env, textnodes, parts[1], source, lineno,
document=self.state.document)
term.rawsource = line
system_messages.extend(sysmsg)
termtexts.append(term.astext())
termnodes.append(term)
termnodes.extend(system_messages)
defnode = nodes.definition()
if definition:
self.state.nested_parse(definition, definition.items[0][1],
defnode)
termnodes.append(defnode)
items.append((termtexts,
nodes.definition_list_item('', *termnodes)))
if 'sorted' in self.options:
items.sort(key=lambda x:
unicodedata.normalize('NFD', x[0][0].lower()))
dlist = nodes.definition_list()
dlist['classes'].append('glossary')
dlist.extend(item[1] for item in items)
node += dlist
return messages + [node]
def token_xrefs(text: str, productionGroup: str = '') -> List[Node]:
if len(productionGroup) != 0:
productionGroup += ':'
retnodes = [] # type: List[Node]
pos = 0
for m in token_re.finditer(text):
if m.start() > pos:
txt = text[pos:m.start()]
retnodes.append(nodes.Text(txt, txt))
refnode = pending_xref(m.group(1), reftype='token', refdomain='std',
reftarget=productionGroup + m.group(1))
refnode += nodes.literal(m.group(1), m.group(1), classes=['xref'])
retnodes.append(refnode)
pos = m.end()
if pos < len(text):
retnodes.append(nodes.Text(text[pos:], text[pos:]))
return retnodes
class ProductionList(SphinxDirective):
"""
Directive to list grammar productions.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {} # type: Dict
def run(self) -> List[Node]:
domain = cast(StandardDomain, self.env.get_domain('std'))
node = addnodes.productionlist() # type: Element
self.set_source_info(node)
# The backslash handling is from ObjectDescription.get_signatures
nl_escape_re = re.compile(r'\\\n')
lines = nl_escape_re.sub('', self.arguments[0]).split('\n')
productionGroup = ""
i = 0
for rule in lines:
if i == 0 and ':' not in rule:
productionGroup = rule.strip()
continue
i += 1
try:
name, tokens = rule.split(':', 1)
except ValueError:
break
subnode = addnodes.production(rule)
subnode['tokenname'] = name.strip()
if subnode['tokenname']:
prefix = 'grammar-token-%s' % productionGroup
node_id = make_id(self.env, self.state.document, prefix, name)
subnode['ids'].append(node_id)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will be removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(name)
if (old_node_id not in self.state.document.ids and
old_node_id not in subnode['ids']):
subnode['ids'].append(old_node_id)
self.state.document.note_implicit_target(subnode, subnode)
if len(productionGroup) != 0:
objName = "%s:%s" % (productionGroup, name)
else:
objName = name
domain.note_object('token', objName, node_id, location=node)
subnode.extend(token_xrefs(tokens, productionGroup))
node.append(subnode)
return [node]
def make_old_id(self, token: str) -> str:
"""Generate old styled node_id for tokens.
.. note:: Old Styled node_id was used until Sphinx-3.0.
This will be removed in Sphinx-5.0.
"""
return nodes.make_id('grammar-token-' + token)
class TokenXRefRole(XRefRole):
def process_link(self, env: "BuildEnvironment", refnode: Element, has_explicit_title: bool,
title: str, target: str) -> Tuple[str, str]:
target = target.lstrip('~') # a title-specific thing
if not self.has_explicit_title and title[0] == '~':
if ':' in title:
_, title = title.split(':')
else:
title = title[1:]
return title, target
class StandardDomain(Domain):
"""
Domain for all objects that don't fit into another domain or are added
via the application interface.
"""
name = 'std'
label = 'Default'
object_types = {
'term': ObjType(_('glossary term'), 'term', searchprio=-1),
'token': ObjType(_('grammar token'), 'token', searchprio=-1),
'label': ObjType(_('reference label'), 'ref', 'keyword',
searchprio=-1),
'envvar': ObjType(_('environment variable'), 'envvar'),
'cmdoption': ObjType(_('program option'), 'option'),
'doc': ObjType(_('document'), 'doc', searchprio=-1)
} # type: Dict[str, ObjType]
directives = {
'program': Program,
'cmdoption': Cmdoption, # old name for backwards compatibility
'option': Cmdoption,
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
} # type: Dict[str, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
# links to tokens in grammar productions
'token': TokenXRefRole(),
# links to terms in glossary
'term': XRefRole(innernodeclass=nodes.inline,
warn_dangling=True),
# links to headings or arbitrary labels
'ref': XRefRole(lowercase=True, innernodeclass=nodes.inline,
warn_dangling=True),
# links to labels of numbered figures, tables and code-blocks
'numref': XRefRole(lowercase=True,
warn_dangling=True),
# links to labels, without a different title
'keyword': XRefRole(warn_dangling=True),
# links to documents
'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
} # type: Dict[str, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
'objects': {}, # (type, name) -> docname, labelid
'labels': { # labelname -> docname, labelid, sectionname
'genindex': ('genindex', '', _('Index')),
'modindex': ('py-modindex', '', _('Module Index')),
'search': ('search', '', _('Search Page')),
},
'anonlabels': { # labelname -> docname, labelid
'genindex': ('genindex', ''),
'modindex': ('py-modindex', ''),
'search': ('search', ''),
},
}
dangling_warnings = {
'term': 'term not in glossary: %(target)s',
'ref': 'undefined label: %(target)s (if the link has no caption '
'the label must precede a section header)',
'numref': 'undefined label: %(target)s',
'keyword': 'unknown keyword: %(target)s',
'doc': 'unknown document: %(target)s',
'option': 'unknown option: %(target)s',
}
enumerable_nodes = { # node_class -> (figtype, title_getter)
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
} # type: Dict[Type[Node], Tuple[str, Callable]]
def __init__(self, env: "BuildEnvironment") -> None:
super().__init__(env)
# set up enumerable nodes
self.enumerable_nodes = copy(self.enumerable_nodes) # create a copy for this instance
for node, settings in env.app.registry.enumerable_nodes.items():
self.enumerable_nodes[node] = settings
def note_hyperlink_target(self, name: str, docname: str, node_id: str,
title: str = '') -> None:
"""Add a hyperlink target for cross reference.
.. warning::
This is only for internal use. Please don't use this from your extension.
``document.note_explicit_target()`` or ``note_implicit_target()`` are recommended to
add a hyperlink target to the document.
This only adds a hyperlink target to the StandardDomain. And this does not add a
node_id to node. Therefore, it is very fragile to calling this without
understanding hyperlink target framework in both docutils and Sphinx.
.. versionadded:: 3.0
"""
if name in self.anonlabels and self.anonlabels[name] != (docname, node_id):
logger.warning(__('duplicate label %s, other instance in %s'),
name, self.env.doc2path(self.anonlabels[name][0]))
self.anonlabels[name] = (docname, node_id)
if title:
self.labels[name] = (docname, node_id, title)
@property
def objects(self) -> Dict[Tuple[str, str], Tuple[str, str]]:
return self.data.setdefault('objects', {}) # (objtype, name) -> docname, labelid
def note_object(self, objtype: str, name: str, labelid: str, location: Any = None
) -> None:
"""Note a generic object for cross reference.
.. versionadded:: 3.0
"""
if (objtype, name) in self.objects:
docname = self.objects[objtype, name][0]
logger.warning(__('duplicate %s description of %s, other instance in %s'),
objtype, name, docname, location=location)
self.objects[objtype, name] = (self.env.docname, labelid)
def add_object(self, objtype: str, name: str, docname: str, labelid: str) -> None:
warnings.warn('StandardDomain.add_object() is deprecated.',
RemovedInSphinx50Warning, stacklevel=2)
self.objects[objtype, name] = (docname, labelid)
@property
def progoptions(self) -> Dict[Tuple[str, str], Tuple[str, str]]:
return self.data.setdefault('progoptions', {}) # (program, name) -> docname, labelid
@property
def labels(self) -> Dict[str, Tuple[str, str, str]]:
return self.data.setdefault('labels', {}) # labelname -> docname, labelid, sectionname
@property
def anonlabels(self) -> Dict[str, Tuple[str, str]]:
return self.data.setdefault('anonlabels', {}) # labelname -> docname, labelid
def clear_doc(self, docname: str) -> None:
key = None # type: Any
for key, (fn, _l) in list(self.progoptions.items()):
if fn == docname:
del self.progoptions[key]
for key, (fn, _l) in list(self.objects.items()):
if fn == docname:
del self.objects[key]
for key, (fn, _l, _l) in list(self.labels.items()):
if fn == docname:
del self.labels[key]
for key, (fn, _l) in list(self.anonlabels.items()):
if fn == docname:
del self.anonlabels[key]
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
self.progoptions[key] = data
for key, data in otherdata['objects'].items():
if data[0] in docnames:
self.objects[key] = data
for key, data in otherdata['labels'].items():
if data[0] in docnames:
self.labels[key] = data
for key, data in otherdata['anonlabels'].items():
if data[0] in docnames:
self.anonlabels[key] = data
def process_doc(self, env: "BuildEnvironment", docname: str, document: nodes.document) -> None: # NOQA
for name, explicit in document.nametypes.items():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if isinstance(node, nodes.target) and 'refid' in node:
# indirect hyperlink targets
node = document.ids.get(node['refid'])
labelid = node['names'][0]
if (node.tagname == 'footnote' or
'refuri' in node or
node.tagname.startswith('desc_')):
# ignore footnote labels, labels automatically generated from a
# link and object descriptions
continue
if name in self.labels:
logger.warning(__('duplicate label %s, other instance in %s'),
name, env.doc2path(self.labels[name][0]),
location=node)
self.anonlabels[name] = docname, labelid
if node.tagname in ('section', 'rubric'):
title = cast(nodes.title, node[0])
sectname = clean_astext(title)
elif self.is_enumerable_node(node):
sectname = self.get_numfig_title(node)
if not sectname:
continue
else:
toctree = next(iter(node.traverse(addnodes.toctree)), None)
if toctree and toctree.get('caption'):
sectname = toctree.get('caption')
else:
# anonymous-only labels
continue
self.labels[name] = docname, labelid, sectname
def add_program_option(self, program: str, name: str, docname: str, labelid: str) -> None:
self.progoptions[program, name] = (docname, labelid)
def build_reference_node(self, fromdocname: str, builder: "Builder", docname: str,
labelid: str, sectname: str, rolename: str, **options: Any
) -> Element:
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
if innernode.get('classes') is not None:
innernode['classes'].append('std')
innernode['classes'].append('std-' + rolename)
if docname == fromdocname:
newnode['refid'] = labelid
else:
# set more info in contnode; in case the
# get_relative_uri call raises NoUri,
# the builder will then have to resolve these
contnode = pending_xref('')
contnode['refdocname'] = docname
contnode['refsectname'] = sectname
newnode['refuri'] = builder.get_relative_uri(
fromdocname, docname)
if labelid:
newnode['refuri'] += '#' + labelid
newnode.append(innernode)
return newnode
def resolve_xref(self, env: "BuildEnvironment", fromdocname: str, builder: "Builder",
typ: str, target: str, node: pending_xref, contnode: Element) -> Element:
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
resolver = self._resolve_numref_xref
elif typ == 'keyword':
resolver = self._resolve_keyword_xref
elif typ == 'doc':
resolver = self._resolve_doc_xref
elif typ == 'option':
resolver = self._resolve_option_xref
elif typ == 'citation':
warnings.warn('pending_xref(domain=std, type=citation) is deprecated: %r' % node,
RemovedInSphinx40Warning, stacklevel=2)
domain = env.get_domain('citation')
return domain.resolve_xref(env, fromdocname, builder, typ, target, node, contnode)
elif typ == 'term':
resolver = self._resolve_term_xref
else:
resolver = self._resolve_obj_xref
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str, node: pending_xref,
contnode: Element) -> Element:
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
docname, labelid = self.anonlabels.get(target, ('', ''))
sectname = node.astext()
else:
# reference to named label; the final node will
# contain the section name after the label
docname, labelid, sectname = self.labels.get(target, ('', '', ''))
if not docname:
return None
return self.build_reference_node(fromdocname, builder,
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
if target in self.labels:
docname, labelid, figname = self.labels.get(target, ('', '', ''))
else:
docname, labelid = self.anonlabels.get(target, ('', ''))
figname = None
if not docname:
return None
target_node = env.get_doctree(docname).ids.get(labelid)
figtype = self.get_enumerable_node_type(target_node)
if figtype is None:
return None
if figtype != 'section' and env.config.numfig is False:
logger.warning(__('numfig is disabled. :numref: is ignored.'), location=node)
return contnode
try:
fignumber = self.get_fignumber(env, builder, figtype, docname, target_node)
if fignumber is None:
return contnode
except ValueError:
logger.warning(__("no number is assigned for %s: %s"), figtype, labelid,
location=node)
return contnode
try:
if node['refexplicit']:
title = contnode.astext()
else:
title = env.config.numfig_format.get(figtype, '')
if figname is None and '{name}' in title:
logger.warning(__('the link has no caption: %s'), title, location=node)
return contnode
else:
fignum = '.'.join(map(str, fignumber))
if '{name}' in title or 'number' in title:
# new style format (cf. "Fig.{number}")
if figname:
newtitle = title.format(name=figname, number=fignum)
else:
newtitle = title.format(number=fignum)
else:
# old style format (cf. "Fig.%s")
newtitle = title % fignum
except KeyError as exc:
logger.warning(__('invalid numfig_format: %s (%r)'), title, exc, location=node)
return contnode
except TypeError:
logger.warning(__('invalid numfig_format: %s'), title, location=node)
return contnode
return self.build_reference_node(fromdocname, builder,
docname, labelid, newtitle, 'numref',
nodeclass=addnodes.number_reference,
title=title)
def _resolve_keyword_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.labels.get(target, ('', '', ''))
if not docname:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def _resolve_doc_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
# directly reference to document by source name; can be absolute or relative
refdoc = node.get('refdoc', fromdocname)
docname = docname_join(refdoc, node['reftarget'])
if docname not in env.all_docs:
return None
else:
if node['refexplicit']:
# reference with explicit title
caption = node.astext()
else:
caption = clean_astext(env.titles[docname])
innernode = nodes.inline(caption, caption, classes=['doc'])
return make_refnode(builder, fromdocname, docname, None, innernode)
def _resolve_option_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.progoptions.get((progname, target), ('', ''))
if not docname:
commands = []
while ws_re.search(target):
subcommand, target = ws_re.split(target, 1)
commands.append(subcommand)
progname = "-".join(commands)
docname, labelid = self.progoptions.get((progname, target), ('', ''))
if docname:
break
else:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def _resolve_term_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
result = self._resolve_obj_xref(env, fromdocname, builder, typ,
target, node, contnode)
if result:
return result
else:
for objtype, term in self.objects:
if objtype == 'term' and term.lower() == target.lower():
docname, labelid = self.objects[objtype, term]
logger.warning(__('term %s not found in case sensitive match.'
'made a reference to %s instead.'),
target, term, location=node, type='ref', subtype='term')
break
else:
docname, labelid = '', ''
if not docname:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def _resolve_obj_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", typ: str, target: str,
node: pending_xref, contnode: Element) -> Element:
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.objects:
docname, labelid = self.objects[objtype, target]
break
else:
docname, labelid = '', ''
if not docname:
return None
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
def resolve_any_xref(self, env: "BuildEnvironment", fromdocname: str,
builder: "Builder", target: str, node: pending_xref,
contnode: Element) -> List[Tuple[str, Element]]:
results = [] # type: List[Tuple[str, Element]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
ltarget if role == 'ref' else target,
node, contnode)
if res:
results.append(('std:' + role, res))
# all others
for objtype in self.object_types:
key = (objtype, target)
if objtype == 'term':
key = (objtype, ltarget)
if key in self.objects:
docname, labelid = self.objects[key]
results.append(('std:' + self.role_for_objtype(objtype),
make_refnode(builder, fromdocname, docname,
labelid, contnode)))
return results
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
for (prog, option), info in self.progoptions.items():
if prog:
fullname = ".".join([prog, option])
yield (fullname, fullname, 'cmdoption', info[0], info[1], 1)
else:
yield (option, option, 'cmdoption', info[0], info[1], 1)
for (type, name), info in self.objects.items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
for name, (docname, labelid, sectionname) in self.labels.items():
yield (name, sectionname, 'label', docname, labelid, -1)
# add anonymous-only labels as well
non_anon_labels = set(self.labels)
for name, (docname, labelid) in self.anonlabels.items():
if name not in non_anon_labels:
yield (name, name, 'label', docname, labelid, -1)
def get_type_name(self, type: ObjType, primary: bool = False) -> str:
# never prepend "Default"
return type.lname
def is_enumerable_node(self, node: Node) -> bool:
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node: Node) -> str:
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
elem = cast(Element, node)
_, title_getter = self.enumerable_nodes.get(elem.__class__, (None, None))
if title_getter:
return title_getter(elem)
else:
for subnode in elem:
if isinstance(subnode, (nodes.caption, nodes.title)):
return clean_astext(subnode)
return None
def get_enumerable_node_type(self, node: Node) -> str:
"""Get type of enumerable nodes."""
def has_child(node: Element, cls: "Type") -> bool:
return any(isinstance(child, cls) for child in node)
if isinstance(node, nodes.section):
return 'section'
elif (isinstance(node, nodes.container) and
'literal_block' in node and
has_child(node, nodes.literal_block)):
# given node is a code-block having caption
return 'code-block'
else:
figtype, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return figtype
def get_fignumber(self, env: "BuildEnvironment", builder: "Builder",
figtype: str, docname: str, target_node: Element) -> Tuple[int, ...]:
if figtype == 'section':
if builder.name == 'latex':
return tuple()
elif docname not in env.toc_secnumbers:
raise ValueError # no number assigned
else:
anchorname = '#' + target_node['ids'][0]
if anchorname not in env.toc_secnumbers[docname]:
# try first heading which has no anchor
return env.toc_secnumbers[docname].get('')
else:
return env.toc_secnumbers[docname].get(anchorname)
else:
try:
figure_id = target_node['ids'][0]
return env.toc_fignumbers[docname][figtype][figure_id]
except (KeyError, IndexError):
# target_node is found, but fignumber is not assigned.
# Maybe it is defined in orphaned document.
raise ValueError
def get_full_qualified_name(self, node: Element) -> str:
if node.get('reftype') == 'option':
progname = node.get('std:program')
command = ws_re.split(node.get('reftarget'))
if progname:
command.insert(0, progname)
option = command.pop()
if command:
return '.'.join(['-'.join(command), option])
else:
return None
else:
return None
def note_citations(self, env: "BuildEnvironment", docname: str, document: nodes.document) -> None: # NOQA
warnings.warn('StandardDomain.note_citations() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
def note_citation_refs(self, env: "BuildEnvironment", docname: str, document: nodes.document) -> None: # NOQA
warnings.warn('StandardDomain.note_citation_refs() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
def note_labels(self, env: "BuildEnvironment", docname: str, document: nodes.document) -> None: # NOQA
warnings.warn('StandardDomain.note_labels() is deprecated.',
RemovedInSphinx40Warning, stacklevel=2)
def setup(app: "Sphinx") -> Dict[str, Any]:
app.add_domain(StandardDomain)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 41.555656 | 114 | 0.564385 |
77eb230cba94a665d1c411c10f0b81b9edece1be
| 5,093 |
py
|
Python
|
conary_test/optionstest.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 43 |
2015-03-31T01:37:10.000Z
|
2021-11-14T16:26:48.000Z
|
conary_test/optionstest.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 9 |
2015-06-10T16:39:41.000Z
|
2020-01-27T16:35:01.000Z
|
conary_test/optionstest.py
|
sassoftware/conary
|
d418968acd5e11ee17ed6d91ca395ea10a040222
|
[
"Apache-2.0"
] | 9 |
2015-04-07T08:12:37.000Z
|
2020-01-26T09:54:18.000Z
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from conary_test import rephelp
from conary import conarycfg
from conary.lib import options
class ConaryCfgTest(rephelp.RepositoryHelper):
cfg = conarycfg.ConaryConfiguration(readConfigFiles=False)
argDef = {}
cfgMap = {}
cfgMap["cfgmap"] = "root"
(NO_PARAM, ONE_PARAM) = (options.NO_PARAM, options.ONE_PARAM)
(OPT_PARAM, MULT_PARAM) = (options.OPT_PARAM, options.MULT_PARAM)
STRICT_OPT_PARAM = options.STRICT_OPT_PARAM
argDef['no'] = NO_PARAM
argDef['one'] = ONE_PARAM
argDef['opt'] = OPT_PARAM
argDef['mult'] = MULT_PARAM
argDef['strict'] = STRICT_OPT_PARAM
def usage(rc=1):
return rc
def testOptions(self):
argv = ['conary', '--no', 'other1', '--one=onev', 'other2', '--opt=opt', '--mult', 'multv1', 'other3', '--mult', '--multv2', 'other4']
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary', 'other1', 'other2', 'other3', 'other4'])
assert(argSet['no'] is True)
assert(argSet['one'] == 'onev')
assert(argSet['opt'] == 'opt')
assert(argSet['mult'] == ['multv1', '--multv2'])
def testBadParams(self):
argv = ['conary', '--unknown']
try:
options.processArgs(self.argDef, self.cfgMap, self.cfg,
self.usage, argv=argv)
raise RuntimeError
except options.OptionError, msg:
assert(msg[0] == 'no such option: --unknown')
argv = ['conary', '--one']
try:
options.processArgs(self.argDef, self.cfgMap, self.cfg,
self.usage, argv=argv)
raise RuntimeError
except options.OptionError, msg:
assert(msg[0] == '--one option requires an argument')
argv = ['conary', '--no=optone']
try:
options.processArgs(self.argDef, self.cfgMap, self.cfg,
self.usage, argv=argv)
raise RuntimeError
except options.OptionError, msg:
assert(msg[0] == '--no option does not take a value')
def testOptionalParam(self):
argv = ['conary', '--opt', '--', '--one=onev', 'other2' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary', '--one=onev', 'other2'])
argv = ['conary', '--opt=', 'one' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary', 'one'])
assert(argSet['opt'] is True)
# test an optional param argument when it is the last argument on the
# command line and no param is given
argv = ['conary', '--opt' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(argSet['opt'] is True)
argv = ['conary', '--opt', 'foo' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(argSet['opt'] == 'foo')
def testStrictOptionalParam(self):
argv = ['conary', '--strict', '--', '--one=onev', 'other2' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary', '--one=onev', 'other2'])
argv = ['conary', '--strict=', 'one' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary', 'one'])
assert(argSet['strict'] is True)
# test an optional param argument when it is the last argument on the
# command line and no param is given
argv = ['conary', '--strict' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(argSet['strict'] is True)
argv = ['conary', '--strict', 'foo' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(argSet['strict'] is True)
def testCfgMap(self):
argv = ['conary', '--cfgmap=rootval' ]
argSet, otherArgs = options.processArgs(self.argDef, self.cfgMap, self.cfg, self.usage, argv=argv)
assert(otherArgs==['conary'])
assert(self.cfg.root == os.getcwd() + '/rootval')
| 42.798319 | 142 | 0.609464 |
eaee2287027fce5d45ecad25e510bc7a1c234461
| 34 |
py
|
Python
|
ddlogger/__init__.py
|
kkew3/dot-dot-logger
|
31620e2137c5a2dbdcf7bb46dd91555c51b64c81
|
[
"MIT"
] | null | null | null |
ddlogger/__init__.py
|
kkew3/dot-dot-logger
|
31620e2137c5a2dbdcf7bb46dd91555c51b64c81
|
[
"MIT"
] | null | null | null |
ddlogger/__init__.py
|
kkew3/dot-dot-logger
|
31620e2137c5a2dbdcf7bb46dd91555c51b64c81
|
[
"MIT"
] | null | null | null |
from ddlogger import DotDotLogger
| 17 | 33 | 0.882353 |
8263a462d8fa970f4276b8a7e67b4f39cd4f4dcc
| 3,874 |
py
|
Python
|
AI_run.py
|
ManuelSerranoR/Deep-Driving
|
c14bc9596704cd33529b8cb30b345eda4ee30432
|
[
"MIT"
] | 1 |
2022-02-22T08:03:12.000Z
|
2022-02-22T08:03:12.000Z
|
AI_run.py
|
ManuelSerranoR/Deep-Driving
|
c14bc9596704cd33529b8cb30b345eda4ee30432
|
[
"MIT"
] | null | null | null |
AI_run.py
|
ManuelSerranoR/Deep-Driving
|
c14bc9596704cd33529b8cb30b345eda4ee30432
|
[
"MIT"
] | null | null | null |
import socket
import sys
import getopt
import math
import os
import cv2
import time
import mss
import mss.tools
import random
import pandas as pd
import numpy as np
from PIL import Image
import pickle
import pyautogui
from skimage import transform #For downsizing images
import tensorflow as tf
sys.path.append('/home/canlab/Desktop/gym_torcs')
from snakeoil3_gym_old import *
K1 = [10.9908717467978, 0.195270206285503, 2.72666097213649, -0.0242112941279921]
K2 = [29.3088785103195, 0.554720938680623, 9.32215050455478, -0.0783918237579237]
K3 = [15.1634806408263, 0.274302220134651, 2.96487374439337, -0.0591630761603098]
K4 = [20.9021653759770, 0.389650930651144, 6.30719447180785, -0.0509215760266584]
K5 = [50.9714030885695, 0.976208001341686, 17.4099031409000, -0.174972239077307]
K6 = [20.1472107264722, 0.374579805036356, 5.99662162767896, -0.0618876680162685]
K7 = [50.8841442241103, 0.974420194231805, 17.2845730533334, -0.168859625168260]
K8 = [5.74658671795202, 0.0943574176842459, 0.715637550153061, -0.00431826756492982]
K9 = [80.4708063923528, 1.55708090168432, 27.4692802112351, -0.262353879617020]
K10 = [112.018020379178, 2.15364021346955, 37.8405518624785, -0.427361896649282]
K11 = [9.23367926763609, 1.95275841037166, 2.72676657363386, -0.242125733895282]
K12 = [12.6977207762501, 2.74374094090788, 2.96618342747267, -0.591707929386049]
K13 = [2.79995871454660, 0.437718377480784, -0.181819647702710, 0.000902734259494991]
K14 = [17.3952997585746, 3.89650745470490, 6.30719267080744, -0.509216683749110]
K15 = [42.1935979235132, 9.76400687139357, 17.4133997845239, -1.75004024168088]
K16 = [14.6525527110655, 3.24460508533655, 5.06178291138374, -0.531858811257178]
K17 = [49.9659836483932, 11.6156203022342, 20.8389206311191, -2.02144735902972]
K18 = [4.88926526279235, 0.941812964337513, 0.712483291441938, -0.0427015511972851]
K19 = [13.5931670099812, 2.89804410735146, 4.12451826819267, -0.495894907656895]
K20 = [10.0958121862069, 2.14046234200640, 2.91703565069511, -0.270506090822326]
def drive(c, time_diff, angle_diff, trackPos_diff):
'''This is only an example. It will get around the track but the
correct thing to do is write your own `drive()` function.'''
S,R = c.S.d, c.R.d
print(S['speedX'])
if S['speedX'] > 10:
x = [S['trackPos'], trackPos_diff/time_diff, S['angle'], angle_diff/time_diff]
R['steer'] = - (np.dot(K12, x))
else:
R['steer'] = 0.7*S['angle'] - S['trackPos']
R['accel'] = .15
if S['speedX'] > 53:
R['brake'] = .1
else: R['brake'] = .0
# ================ MAIN ================
if __name__ == "__main__":
C = Client(p = 3101)
time_new = 0.2
time_old = 0
angle_new = 0
angle_old = 0
trackPos_new = 0
trackPos_old = 0
for step in range(C.maxSteps):
print('Step',step,'out of',C.maxSteps-1)
if step == 0:
print("--------------")
print("change view")
print("--------------")
pyautogui.press("f2")
#This causes the delay of 0.2 seconds
C.get_servers_input()
time_old = time_new
time_new = time.time()
time_diff = time_new - time_old
print('TIME:', time_diff)
trackPos_old = trackPos_new
trackPos_new = C.S.d['trackPos']
trackPos_diff = trackPos_new - trackPos_old
print('DIST:', trackPos_diff)
angle_old = angle_new
angle_new = C.S.d['angle']
angle_diff = angle_new - angle_old
print('ANGLE:', angle_diff)
drive(C, time_diff, angle_diff, trackPos_diff)
C.respond_to_server()
print("--------------")
print("Close window")
print("--------------")
pyautogui.click(80, 40, button='left')
sys.exit()
C.shutdown()
| 33.982456 | 91 | 0.655653 |
2ba43113c42a149f08a26bf3d80a20f5b56d6f53
| 28,800 |
py
|
Python
|
test/python/test_attribute/test_attributes.py
|
ritvik06/Higra
|
5ab1601224cd5a3af5143ce380125fdfff1bde66
|
[
"CECILL-B"
] | 64 |
2019-08-18T19:23:23.000Z
|
2022-03-21T04:15:04.000Z
|
test/python/test_attribute/test_attributes.py
|
ritvik06/Higra
|
5ab1601224cd5a3af5143ce380125fdfff1bde66
|
[
"CECILL-B"
] | 120 |
2019-08-16T09:10:35.000Z
|
2022-03-17T09:42:58.000Z
|
test/python/test_attribute/test_attributes.py
|
ritvik06/Higra
|
5ab1601224cd5a3af5143ce380125fdfff1bde66
|
[
"CECILL-B"
] | 12 |
2019-10-04T07:35:55.000Z
|
2021-01-10T19:59:11.000Z
|
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : Benjamin Perret #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import unittest
import numpy as np
import higra as hg
class TestAttributes(unittest.TestCase):
@staticmethod
def get_test_tree():
"""
base graph is
(0)-- 0 --(1)-- 2 --(2)
| | |
6 6 0
| | |
(3)-- 0 --(4)-- 4 --(5)
| | |
5 5 3
| | |
(6)-- 0 --(7)-- 1 --(8)
Minima are
A: (0,1)
B: (3,4)
C: (2,5)
D: (6,7)
BPT:
4 +-------16------+
| |
3 +-------15-----+ |
| | |
2 +---14--+ | |
| | | |
1 | | +--13-+ |
| | | | |
0 +-9-+ +-10+ +-12+ | +-11+
+ + + + + + + + +
0 1 2 5 6 7 8 3 4
:return:
"""
g = hg.get_4_adjacency_graph((3, 3))
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 0, 1))
return hg.bpt_canonical(g, edge_weights)
def setUp(self):
hg.clear_all_attributes()
def test_area(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_area = [1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 4, 7, 9]
area = hg.attribute_area(tree)
self.assertTrue(np.allclose(ref_area, area))
leaf_area = np.asarray([1, 2, 1, 1, 2, 1, 1, 1, 3])
ref_area = [1, 2, 1, 1, 2, 1, 1, 1, 3, 3, 2, 3, 2, 5, 5, 10, 13]
area = hg.attribute_area(tree, vertex_area=leaf_area)
self.assertTrue(np.allclose(ref_area, area))
def test_area_default_param(self):
g = hg.get_4_adjacency_graph((2, 3))
edge_weights = np.asarray((1, 4, 6, 5, 2, 7, 3))
ref_area = (1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 6)
tree, altitudes = hg.bpt_canonical(g, edge_weights)
area = hg.attribute_area(tree)
self.assertTrue(np.all(ref_area == area))
tree2 = hg.Tree(tree.parents())
area2 = hg.attribute_area(tree2)
self.assertTrue(np.all(ref_area == area2))
def test_attribute_volume(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_attribute = [0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 4, 8, 2, 8, 12, 27, 35.]
attribute = hg.attribute_volume(tree, altitudes)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_attribute_volume_with_area(self):
tree = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
altitudes = np.asarray((0, 0, 0, 0, 0, 2, 1, 4.))
area = np.asarray((2, 1, 1, 3, 2, 3, 6, 9))
ref_attribute = [0, 0, 0, 0, 0, 6, 18, 24]
attribute = hg.attribute_volume(tree, altitudes, area)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_lca_map(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_attribute = [9, 16, 14, 16, 10, 11, 16, 16, 16, 15, 12, 13]
attribute = hg.attribute_lca_map(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_frontier_length(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_attribute = [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 5]
attribute = hg.attribute_frontier_length(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_frontier_length_rag(self):
g = hg.get_4_adjacency_graph((3, 3))
vertex_labels = np.asarray(((0, 1, 1),
(0, 2, 2),
(3, 2, 4)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
edge_weights = np.asarray((1, 5, 4, 3, 6, 2), dtype=np.float64)
tree, altitudes = hg.bpt_canonical(rag, edge_weights)
ref_attribute = np.asarray([0, 0, 0, 0, 0, 1, 2, 1, 4], dtype=np.float64)
attribute = hg.attribute_frontier_length(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_frontier_strength(self):
tree, altitudes = TestAttributes.get_test_tree()
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 0, 1), dtype=np.float64)
ref_attribute = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 26 / 5]
attribute = hg.attribute_frontier_strength(tree, edge_weights)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_frontier_strength_rag(self):
g = hg.get_4_adjacency_graph((3, 3))
vertex_labels = np.asarray(((0, 1, 1),
(0, 2, 2),
(3, 2, 4)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
rag_edge_weights = np.asarray((1, 5, 4, 3, 6, 2))
tree, altitudes = hg.bpt_canonical(rag, rag_edge_weights)
# tree is [5 5 6 7 6 7 8 8 8]
edge_weights = np.asarray((1, 6, 2, 6, 1, 1, 5, 4, 5, 3, 1, 1), dtype=np.float64)
ref_attribute = np.asarray([0, 0, 0, 0, 0, 1, 2, 5, 9 / 4], dtype=np.float64)
attribute = hg.attribute_frontier_strength(tree, edge_weights)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_contour_length_partition_tree(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_attribute = np.asarray([4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 8, 10, 16, 12], dtype=np.float64)
attribute = hg.attribute_contour_length(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_contour_length_partition_tree2(self):
tree, altitudes = TestAttributes.get_test_tree()
hg.set_attribute(hg.CptHierarchy.get_leaf_graph(tree), "no_border_vertex_out_degree", None)
ref_attribute = np.asarray([2, 3, 2, 3, 4, 3, 2, 3, 2, 3, 3, 5, 3, 3, 4, 5, 0], dtype=np.float64)
attribute = hg.attribute_contour_length(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_contour_length_component_tree(self):
g = hg.get_4_adjacency_graph((4, 4))
# for reference, tree is a max tree on the following image
# 0, 1, 4, 4,
# 7, 5, 6, 8,
# 2, 3, 4, 1,
# 9, 8, 6, 7
t = hg.Tree((28, 27, 24, 24,
20, 23, 22, 18,
26, 25, 24, 27,
16, 17, 21, 19,
17, 21, 22, 21, 23, 24, 23, 24, 25, 26, 27, 28, 28),
hg.TreeCategory.ComponentTree)
res = hg.attribute_contour_length(t, leaf_graph=g)
ref = np.asarray((4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 6, 4, 4, 4, 10, 6, 10, 22, 20, 18, 16, 16), dtype=np.float64)
self.assertTrue(np.all(res == ref))
def test_contour_length_tree_of_shapes(self):
image = np.asarray(((0, 0, 0, 0),
(0, -2, 2, 0),
(0, -1, 1, 0),
(0, 0, 0, 0)))
tree, altitudes = hg.component_tree_tree_of_shapes_image2d(image)
res = hg.attribute_contour_length(tree)
ref = np.asarray((4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 6, 6, 16), dtype=np.float64)
self.assertTrue(np.all(res == ref))
def test_contour_length_rag_partition_tree(self):
g = hg.get_4_adjacency_graph((3, 3))
vertex_labels = np.asarray(((0, 1, 1),
(0, 2, 2),
(3, 2, 4)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
edge_weights = np.asarray((1, 5, 4, 3, 6, 2), dtype=np.float64)
tree, altitudes = hg.bpt_canonical(rag, edge_weights)
ref_attribute = np.asarray([3, 3, 6, 2, 2, 4, 4, 4, 0], dtype=np.float64)
attribute = hg.attribute_contour_length(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_contour_strength_partition_tree(self):
tree, altitudes = TestAttributes.get_test_tree()
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 0, 1), dtype=np.float64)
attribute = hg.attribute_contour_strength(tree, edge_weights)
ref_perimeter = np.asarray([4, 4, 4, 4, 4, 4, 4, 4, 4, 6, 6, 6, 6, 8, 10, 16, 12], dtype=np.float64)
ref_weights = np.asarray([6, 8, 2, 11, 15, 7, 5, 6, 4, 14, 9, 26, 11, 13, 19, 26, 0], dtype=np.float64)
self.assertTrue(np.allclose(ref_weights / ref_perimeter, attribute))
def test_contour_strength_partition_tree2(self):
tree, altitudes = TestAttributes.get_test_tree()
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 0, 1), dtype=np.float64)
hg.set_attribute(hg.CptHierarchy.get_leaf_graph(tree), "no_border_vertex_out_degree", None)
attribute = hg.attribute_contour_strength(tree, edge_weights)
ref_perimeter = np.asarray([2, 3, 2, 3, 4, 3, 2, 3, 2, 3, 3, 5, 3, 3, 4, 5, 1], dtype=np.float64)
ref_weights = np.asarray([6, 8, 2, 11, 15, 7, 5, 6, 4, 14, 9, 26, 11, 13, 19, 26, 0], dtype=np.float64)
self.assertTrue(np.allclose(ref_weights / ref_perimeter, attribute))
def test_contour_strength_component_tree(self):
g = hg.get_4_adjacency_graph((4, 4))
edge_weights = np.arange(g.num_edges(), dtype=np.float64)
# for reference, tree is a max tree on the following image
# 0, 1, 4, 4,
# 7, 5, 6, 8,
# 2, 3, 4, 1,
# 9, 8, 6, 7
t = hg.Tree((28, 27, 24, 24,
20, 23, 22, 18,
26, 25, 24, 27,
16, 17, 21, 19,
17, 21, 22, 21, 23, 24, 23, 24, 25, 26, 27, 28, 28),
hg.TreeCategory.ComponentTree)
hg.CptHierarchy.link(t, g)
res = hg.attribute_contour_strength(t, edge_weights)
ref_perimeter = np.asarray((4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 4, 4, 4,
4, 6, 4, 4, 4, 10, 6, 10, 22, 20, 18, 16, 16), dtype=np.float64)
ref_weights = np.asarray((1, 5, 11, 10,
16, 29, 37, 30,
37, 57, 65, 51,
36, 60, 64, 43,
36, 54, 30, 43, 16, 71, 45, 58, 123, 94, 57, 1, 0), dtype=np.float64)
self.assertTrue(np.allclose(ref_weights / ref_perimeter, res))
def test_contour_strength_rag_partition_tree(self):
g = hg.get_4_adjacency_graph((3, 3))
vertex_labels = np.asarray(((0, 1, 1),
(0, 2, 2),
(3, 2, 4)))
base_edge_weights = np.arange(g.num_edges(), dtype=np.float64)
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
edge_weights = np.asarray((1, 5, 4, 3, 6, 2))
tree, altitudes = hg.bpt_canonical(rag, edge_weights)
attribute = hg.attribute_contour_strength(tree, base_edge_weights)
ref_perimeter = np.asarray([3, 3, 6, 2, 2, 4, 4, 4, 1], dtype=np.float64)
ref_weights = np.asarray([11, 7, 42, 16, 20, 18, 22, 22, 0], dtype=np.float64)
self.assertTrue(np.allclose(ref_weights / ref_perimeter, attribute))
def test_compactness(self):
tree, altitudes = TestAttributes.get_test_tree()
ref_attribute = [1., 1., 1., 1., 1., 1., 1., 1., 1., 0.88888889, 0.88888889, 0.88888889, 0.88888889, 0.75, 0.64,
0.4375, 1.]
attribute = hg.attribute_compactness(tree)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_mean_vertex_weights(self):
tree, altitudes = TestAttributes.get_test_tree()
leaf_data = np.asarray((0, 1, 2, 3, 4, 5, 6, 7, 8),
dtype=np.float64)
ref_attribute = np.asarray((0, 1, 2, 3, 4, 5, 6, 7, 8,
1. / 2, 7. / 2, 7. / 2, 13. / 2, 7.,
2., 29. / 7, 4.))
attribute = hg.attribute_mean_vertex_weights(tree, vertex_weights=leaf_data)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_mean_vertex_weights_vectorial(self):
tree, altitudes = TestAttributes.get_test_tree()
leaf_data = np.asarray(((0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8)),
dtype=np.float64)
ref_attribute = np.asarray(((0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8),
(1. / 2, 1. / 2), (7. / 2, 7. / 2), (7. / 2, 7. / 2), (13. / 2, 13. / 2), (7., 7.),
(2., 2.), (29. / 7, 29. / 7), (4., 4.)))
attribute = hg.attribute_mean_vertex_weights(tree, vertex_weights=leaf_data)
self.assertTrue(np.allclose(ref_attribute, attribute))
def test_sibling(self):
t = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
ref = np.asarray((1, 0, 3, 4, 2, 6, 5, 7))
res = hg.attribute_sibling(t)
self.assertTrue(np.all(ref == res))
t = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
ref2 = np.asarray((1, 0, 4, 2, 3, 6, 5, 7))
res2 = hg.attribute_sibling(t, -1)
self.assertTrue(np.all(ref2 == res2))
def test_depth(self):
t = hg.Tree((6, 6, 7, 8, 8, 8, 7, 9, 9, 9))
ref = np.asarray((3, 3, 2, 2, 2, 2, 2, 1, 1, 0))
res = hg.attribute_depth(t)
self.assertTrue(np.all(ref == res))
def test_regular_altitudes(self):
t = hg.Tree((6, 6, 7, 8, 8, 8, 7, 9, 9, 9))
ref = np.asarray((0, 0, 0, 0, 0, 0, 1 / 3, 2 / 3, 2 / 3, 1))
res = hg.attribute_regular_altitudes(t)
self.assertTrue(np.allclose(ref, res))
def test_vertex_coordinates(self):
g = hg.get_4_adjacency_graph((2, 3))
ref = np.asarray((((0, 0), (0, 1), (0, 2)),
((1, 0), (1, 1), (1, 2))))
res = hg.attribute_vertex_coordinates(g)
self.assertTrue(np.allclose(ref, res))
def test_edge_length(self):
g = hg.get_4_adjacency_graph((2, 3))
ref = np.asarray((1, 1, 1, 1, 1, 1, 1))
res = hg.attribute_edge_length(g)
self.assertTrue(np.allclose(ref, res))
def test_edge_length_rag(self):
g = hg.get_4_adjacency_graph((2, 3))
vertex_labels = np.asarray(((1, 2, 2),
(3, 3, 3)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
ref = np.asarray((1, 1, 2))
res = hg.attribute_edge_length(rag)
self.assertTrue(np.allclose(ref, res))
def test_vertex_perimeter(self):
g = hg.get_4_adjacency_graph((2, 3))
ref = np.asarray(((4, 4, 4),
(4, 4, 4)))
res = hg.attribute_vertex_perimeter(g)
self.assertTrue(np.allclose(ref, res))
def test_vertex_perimeter2(self):
g = hg.get_4_adjacency_graph((2, 3))
hg.set_attribute(g, "no_border_vertex_out_degree", None)
ref = np.asarray(((2, 3, 2),
(2, 3, 2)))
res = hg.attribute_vertex_perimeter(g)
self.assertTrue(np.allclose(ref, res))
def test_vertex_perimeter_rag(self):
g = hg.get_4_adjacency_graph((2, 3))
vertex_labels = np.asarray(((1, 2, 2),
(3, 3, 3)))
rag = hg.make_region_adjacency_graph_from_labelisation(g, vertex_labels)
ref = np.asarray((2, 3, 3))
res = hg.attribute_vertex_perimeter(rag)
self.assertTrue(np.allclose(ref, res))
def test_attribute_vertex_list(self):
tree, altitudes = TestAttributes.get_test_tree()
res = hg.attribute_vertex_list(tree)
ref = [[0], [1], [2], [3], [4], [5], [6], [7], [8],
[0, 1], [2, 5], [3, 4], [6, 7], [6, 7, 8],
[0, 1, 2, 5], [0, 1, 2, 5, 6, 7, 8],
[0, 1, 2, 5, 6, 7, 8, 3, 4]]
self.assertTrue(len(ref) == len(res))
for i in range(len(ref)):
self.assertTrue(set(ref[i]) == set(res[i]))
def test_attribute_gaussian_region_weights_model_scalar(self):
tree, altitudes = TestAttributes.get_test_tree()
vertex_list = hg.attribute_vertex_list(tree)
np.random.seed(42)
vertex_weights = np.random.rand(tree.num_leaves())
mean, variance = hg.attribute_gaussian_region_weights_model(tree, vertex_weights)
for i in tree.leaves_to_root_iterator():
m = np.mean(vertex_weights[vertex_list[i]])
v = np.var(vertex_weights[vertex_list[i]])
self.assertTrue(np.isclose(m, mean[i]))
self.assertTrue(np.isclose(v, variance[i]))
def test_attribute_gaussian_region_weights_model_scalar_int(self):
tree, altitudes = TestAttributes.get_test_tree()
vertex_list = hg.attribute_vertex_list(tree)
np.random.seed(42)
vertex_weights = np.random.randint(255, size=tree.num_leaves())
mean, variance = hg.attribute_gaussian_region_weights_model(tree, vertex_weights)
for i in tree.leaves_to_root_iterator():
m = np.mean(vertex_weights[vertex_list[i]])
v = np.var(vertex_weights[vertex_list[i]])
self.assertTrue(np.isclose(m, mean[i]))
self.assertTrue(np.isclose(v, variance[i]))
def test_attribute_gaussian_region_weights_model_vectorial(self):
tree, altitudes = TestAttributes.get_test_tree()
vertex_list = hg.attribute_vertex_list(tree)
np.random.seed(42)
vertex_weights = np.random.rand(tree.num_leaves(), 3)
mean, variance = hg.attribute_gaussian_region_weights_model(tree, vertex_weights)
for i in tree.leaves_to_root_iterator(include_leaves=True):
m = np.mean(vertex_weights[vertex_list[i]], 0)
self.assertTrue(np.allclose(m, mean[i, :]))
# numpy wrongly interprets a single observation with several variables as
# multiple observations of a single variables
if i >= tree.num_leaves():
v = np.cov(vertex_weights[vertex_list[i]], rowvar=False, bias=True)
self.assertTrue(np.allclose(v, variance[i, ...]))
else:
v = np.zeros_like(variance[i, ...])
self.assertTrue(np.allclose(v, variance[i, ...]))
def test_tree_attribute_extrema(self):
t = hg.Tree((11, 11, 9, 9, 8, 8, 13, 13, 10, 10, 12, 12, 14, 14, 14))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 4, 8, 10.))
ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0))
res = hg.attribute_extrema(t, altitudes)
self.assertTrue(np.all(ref == res))
def test_tree_attribute_extrema2(self):
graph = hg.get_4_adjacency_implicit_graph((4, 4))
vertex_weights = np.asarray((0, 1, 4, 4,
7, 5, 6, 8,
2, 3, 4, 1,
9, 8, 6, 7))
tree, altitudes = hg.component_tree_max_tree(graph, vertex_weights)
extrema = hg.attribute_extrema(tree, altitudes)
expected_extrema = np.asarray((0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0))
self.assertTrue(np.all(expected_extrema == extrema))
def test_attribute_extinction_value(self):
# same as dynamics
t = hg.Tree((8, 8, 9, 7, 7, 11, 11, 9, 10, 10, 12, 12, 12))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 8, 10.))
attribute = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 3, 2, 4, 2, 10.))
ref = np.asarray((3, 3, 0, 10, 10, 2, 2, 10, 3, 10, 10, 2, 10))
res = hg.attribute_extinction_value(t, altitudes, attribute)
self.assertTrue(np.all(ref == res))
res = hg.attribute_extinction_value(t, altitudes, attribute, True)
self.assertTrue(np.all(ref == res))
res = hg.attribute_extinction_value(t, altitudes, attribute, "increasing")
self.assertTrue(np.all(ref == res))
def test_attribute_extinction_value2(self):
graph = hg.get_4_adjacency_implicit_graph((4, 4))
vertex_weights = np.asarray((0, 1, 4, 4,
7, 5, 6, 8,
2, 3, 4, 1,
9, 8, 6, 7))
tree, altitudes = hg.component_tree_max_tree(graph, vertex_weights)
area = hg.attribute_area(tree)
expected_ext = np.asarray((0, 0, 0, 0,
1, 0, 0, 4,
0, 0, 0, 0,
16, 0, 0, 1,
16, 16, 4, 1, 1, 16, 4, 4, 16, 16, 16, 16, 16))
ext = hg.attribute_extinction_value(tree, altitudes, area)
self.assertTrue(np.all(expected_ext == ext))
ext = hg.attribute_extinction_value(tree, altitudes, area, False)
self.assertTrue(np.all(expected_ext == ext))
ext = hg.attribute_extinction_value(tree, altitudes, area, "decreasing")
self.assertTrue(np.all(expected_ext == ext))
def test_attribute_height_inc(self):
t = hg.Tree((7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 3, 2, 1, 5, 8.))
ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 2, 3, 7, 6, 7))
res = hg.attribute_height(t, altitudes)
self.assertTrue(np.all(res == ref))
res = hg.attribute_height(t, altitudes, True)
self.assertTrue(np.all(res == ref))
res = hg.attribute_height(t, altitudes, "increasing")
self.assertTrue(np.all(res == ref))
def test_attribute_height_dec(self):
t = hg.Tree((7, 7, 8, 8, 8, 9, 9, 10, 10, 11, 11, 11))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 8, 5, 9, 4, 1.))
ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 4, 1, 8, 7, 8))
res = hg.attribute_height(t, altitudes)
self.assertTrue(np.all(res == ref))
res = hg.attribute_height(t, altitudes, False)
self.assertTrue(np.all(res == ref))
res = hg.attribute_height(t, altitudes, "decreasing")
self.assertTrue(np.all(res == ref))
def test_attribute_dynamics(self):
t = hg.Tree((8, 8, 9, 7, 7, 11, 11, 9, 10, 10, 12, 12, 12))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 4, 8, 10.))
ref = np.asarray((3, 3, 0, 10, 10, 2, 2, 10, 3, 10, 10, 2, 10))
res = hg.attribute_dynamics(t, altitudes)
self.assertTrue(np.all(res == ref))
def test_attribute_dynamics2(self):
t = hg.Tree((11, 11, 9, 9, 8, 8, 13, 13, 10, 10, 12, 12, 14, 14, 14))
altitudes = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 4, 8, 10.))
ref = np.asarray((3, 3, 0, 0, 10, 10, 2, 2, 10, 0, 10, 3, 10, 2, 10))
res = hg.attribute_dynamics(t, altitudes)
self.assertTrue(np.all(res == ref))
def test_attribute_child_number(self):
tree = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
ref = np.asarray((0, 1, 0, 1, 2, 0, 1, -1), dtype=np.int64)
res = hg.attribute_child_number(tree)
self.assertTrue(np.all(res == ref))
def test_attribute_children_pair_sum_product_scalar(self):
tree = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
node_weights = np.asarray((2, 3, 4, 5, 6, 7, 8, 9))
res = hg.attribute_children_pair_sum_product(tree, node_weights)
ref = np.asarray((0, 0, 0, 0, 0, 6, 74, 56))
self.assertTrue(np.allclose(ref, res))
def test_attribute_children_pair_sum_product_scalar(self):
tree = hg.Tree((5, 5, 6, 6, 6, 7, 7, 7))
node_weights = np.asarray((2, 9, 3, 8, 4, 7, 5, 6, 6, 5, 7, 4, 8, 3, 9, 2)).reshape((8, 2))
res = hg.attribute_children_pair_sum_product(tree, node_weights)
ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 72, 74, 107, 56, 12)).reshape((8, 2))
self.assertTrue(np.allclose(ref, res))
def test_attribute_tree_sampling_probability_edge_model(self):
g = hg.get_4_adjacency_graph((3, 3))
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 2, 2))
tree, altitudes = hg.quasi_flat_zone_hierarchy(g, edge_weights)
res = hg.attribute_tree_sampling_probability(tree, g, edge_weights, model='edge')
ref = np.asarray((0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 3, 26)) / np.sum(edge_weights)
self.assertTrue(np.allclose(ref, res))
def test_attribute_tree_sampling_probability_null_model(self):
g = hg.get_4_adjacency_graph((3, 3))
edge_weights = np.asarray((0, 6, 2, 6, 0, 0, 5, 4, 5, 3, 2, 2))
tree, altitudes = hg.quasi_flat_zone_hierarchy(g, edge_weights)
res = hg.attribute_tree_sampling_probability(tree, g, edge_weights, model='null')
Z = np.sum(edge_weights)
ref = np.asarray(
(0, 0, 0, 0, 0, 0, 0, 0, 0,
6 * 8, 2 * 7,
11 * 15,
6 * 2 + 6 * 7 + 8 * 2 + 8 * 7,
7 * 9 + 7 * 5 + 9 * 5,
6 * 7 + 6 * 9 + 6 * 5 + 8 * 7 + 8 * 9 + 8 * 5 + 2 * 7 + 2 * 9 + 2 * 5 + 7 * 7 + 7 * 9 + 7 * 5,
6 * 11 + 6 * 15 + 8 * 11 + 8 * 15 + 2 * 11 + 2 * 15 + 11 * 7 + 11 * 7 + 11 * 9 + 11 * 5 + 15 * 7 + 15 * 7 + 15 * 9 + 15 * 5)) / \
(Z * Z)
self.assertTrue(np.allclose(ref, res))
def test_topological_height(self):
tree = hg.Tree((6, 6, 9, 9, 7, 8, 10, 8, 10, 10, 10))
res = hg.attribute_topological_height(tree)
ref = (0, 0, 0, 0, 0, 0, 1, 1, 2, 1, 3)
self.assertTrue(np.array_equal(res, ref))
def test_moment_of_inertia(self):
"""
The test image is a binary image of 5x15 pixels:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 1 0 0 1 0 0 0 0 0 1 1 1 0
0 1 1 1 0 1 0 1 1 1 0 1 1 1 0
0 0 1 0 0 1 0 0 0 0 0 1 1 1 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
The max-tree of this image is composed of five non-leaf nodes:
the root and the four maxima (a cross, a vertical line, a horizontal line and a square)
Using the formula given in https://en.wikipedia.org/wiki/Image_moment, the moment of inertia of each shape is:
- Cross: 0.16
- Square: 0.1481
- Vertical and horizontal lines: 0.2222
- Rectangle (root): 0.2756
The moment of inertia of the leaf nodes are set to 0.0
"""
graph = hg.get_4_adjacency_implicit_graph((5, 15))
vertex_weights = np.asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
tree, altitudes = hg.component_tree_max_tree(graph, vertex_weights)
res = hg.attribute_moment_of_inertia(tree)
ref = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1481, 0.2222, 0.16, 0.2222, 0.2756)
self.assertTrue(np.allclose(res,ref,atol=0.0001))
if __name__ == '__main__':
unittest.main()
| 42.793462 | 142 | 0.513785 |
53e6e342c1b77e3539f650414a90bcb7ec4ead81
| 5,302 |
py
|
Python
|
Peer_Node.py
|
ck-chaudhary/Auto-PUFChain-An-Automated-Interaction-Tool-for-PUFs-and-Blockchain-in-Electronic-Supply-Chain
|
e3916a5b3c4d66e437bc78c5f2e866dbebd04317
|
[
"MIT"
] | null | null | null |
Peer_Node.py
|
ck-chaudhary/Auto-PUFChain-An-Automated-Interaction-Tool-for-PUFs-and-Blockchain-in-Electronic-Supply-Chain
|
e3916a5b3c4d66e437bc78c5f2e866dbebd04317
|
[
"MIT"
] | null | null | null |
Peer_Node.py
|
ck-chaudhary/Auto-PUFChain-An-Automated-Interaction-Tool-for-PUFs-and-Blockchain-in-Electronic-Supply-Chain
|
e3916a5b3c4d66e437bc78c5f2e866dbebd04317
|
[
"MIT"
] | null | null | null |
# Code for Peer Node interfacing
import json
from web3 import Web3
import time
import os
import sys
node_url = 'HTTP://127.0.0.1:7545'
w3 = Web3(Web3.HTTPProvider(node_url))
print("connected = ",w3.isConnected())
w3.eth.defaultAccount = w3.eth.accounts[9]
print(w3.eth.blockNumber)
contract_address_file = open("contract_address.txt","r")
cont = contract_address_file.read()
print(cont)
contract_address = w3.toChecksumAddress(cont)
abi = json.loads('[{"constant":true,"inputs":[{"name":"_identifier","type":"uint256"}],"name":"checkOwnership","outputs":[{"name":"_ownerName","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"node","type":"address"}],"name":"KeyEnroll","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"peer","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_identifier","type":"uint256"}],"name":"authenticateDevice","outputs":[{"name":"_challenge","type":"int256[]"},{"name":"_response","type":"uint256[]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_identifier","type":"uint256"},{"name":"buyer","type":"address"}],"name":"transferOwnership","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":false,"inputs":[{"name":"_identifier","type":"uint256"},{"name":"_owner","type":"address"},{"name":"_challenge","type":"int256"},{"name":"_response","type":"uint256"}],"name":"registerDevice","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"TA","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"consortium","outputs":[{"name":"","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"node","type":"address"}],"name":"addNode","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"idInfo","outputs":[{"name":"owner","type":"address"}],"payable":false,"stateMutability":"view","type":"function"},{"inputs":[],"payable":false,"stateMutability":"nonpayable","type":"constructor"}]');
deployed_contract = w3.eth.contract(address= contract_address, abi= abi)
print(deployed_contract.address)
choice=True
while choice:
print ("""
1.Check Ownership
2.Authenticate device
3.Transfer Ownership
3.Exit/Quit
""")
choice=input("What would you like to do? ")
if choice=="1":
device_id = int(input("Enter the unique id for the device: "))
owner = deployed_contract.functions.checkOwnership(device_id).call()
print("Owner of device id:",device_id,"is ",owner)
elif choice=="2":
device_id = int(input("Enter the device id you would like to authenticate: "))
y = deployed_contract.functions.authenticateDevice(device_id).call()
print(len(y[0]))
for i in range(len(y[0])):
print(y[0][i], y[1][i])
board = input("Enter the board number to which the device is connected: ")
file2_name = 'chal_from_device_' + str(board) + '.txt'
file_chal = open(file2_name,"a")
for i in range(len(y[0])):
file_chal.write(str(y[0][i]).zfill(64) + '\n')
command2 = 'sudo python APUF_64_1.py ' + str(board) + ' ' + file2_name
file_chal.close()
os.system(command2)
# response of chal_10_device
resp = 'a_puf_golden_' + str(board) + '.txt'
res =[]
with open(resp) as f1:
res = f1.read().split('\n')
# Authenticating the chal-resp pairs
flag=0
print(y)
test = [int(res[i]) for i in range(len(res)-1)]
print(test)
for i in range(len(res)-1):
if(test[i] == y[1][i]):
continue
else:
flag =1
break;
#print(flag)
#os.system("sudo rm chal_for_device.txt")
if (flag == 0) and (len(res)!= 0):
#auth[device_id] = 1
print("Device authenticated !!! ,proceed for ownership transfer")
else:
print("Device authentication failed !")
elif choice=="3":
device_id = int(input("Enter the device id you would like to check ownership of: "))
buyer = input("Whom do you want to transfer the ownership to: ")
# Transferring the ownership to authenticated peer
print("Previous Owner:", deployed_contract.functions.checkOwnership(device_id).call())
tx_hash = deployed_contract.functions.transferOwnership(device_id, buyer).transact()
#print(tx_hash)
print("Current Owner:", deployed_contract.functions.checkOwnership(device_id).call())
print("\n Ownership Transferred Successfully !!!")
elif choice=="4":
print("\n Goodbye")
break
elif choice !="":
print("\n Not Valid Choice Try again")
| 50.018868 | 2,036 | 0.621275 |
c872803d7e248ff842d86669bd9fc9a80ff64c65
| 396,325 |
py
|
Python
|
test/test_linalg.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 1 |
2022-01-20T03:49:23.000Z
|
2022-01-20T03:49:23.000Z
|
test/test_linalg.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | 14 |
2021-10-14T06:58:50.000Z
|
2021-12-17T11:51:07.000Z
|
test/test_linalg.py
|
vuanvin/pytorch
|
9267fd8d7395074001ad7cf2a8f28082dbff6b0b
|
[
"Intel"
] | null | null | null |
# -*- coding: utf-8 -*-
# Owner(s): ["module: linear algebra"]
import torch
import numpy as np
import unittest
import itertools
import warnings
import math
from math import inf, nan, isnan
import random
from random import randrange
from itertools import product
from functools import reduce, partial, wraps
from torch.testing._internal.common_utils import \
(TestCase, run_tests, TEST_SCIPY, IS_MACOS, IS_WINDOWS, slowTest,
TEST_WITH_ASAN, TEST_WITH_ROCM, IS_FBCODE, IS_REMOTE_GPU,
iter_indices, gradcheck, gradgradcheck,
make_fullrank_matrices_with_distinct_singular_values)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes,
onlyCPU, skipCUDAIf, skipCUDAIfNoMagma, skipCPUIfNoLapack, precisionOverride,
skipCUDAIfNoMagmaAndNoCusolver, skipCUDAIfRocm, onlyNativeDeviceTypes, dtypesIfCUDA,
onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver)
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import (
all_types, floating_types, floating_and_complex_types, get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes,
get_all_fp_dtypes,
)
from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9
from torch.distributions.binomial import Binomial
# Protects against includes accidentally setting the default dtype
# NOTE: jit_metaprogramming_utils sets the default dtype to double!
torch.set_default_dtype(torch.float32)
assert torch.get_default_dtype() is torch.float32
if TEST_SCIPY:
import scipy
def setLinalgBackendsToDefaultFinally(fn):
@wraps(fn)
def _fn(*args, **kwargs):
try:
fn(*args, **kwargs)
finally:
# Set linalg backend back to default to make sure potential failures in one test
# doesn't affect other linalg tests
torch.backends.cuda.preferred_linalg_library('default')
return _fn
class TestLinalg(TestCase):
def setUp(self):
super(self.__class__, self).setUp()
torch.backends.cuda.matmul.allow_tf32 = False
def tearDown(self):
torch.backends.cuda.matmul.allow_tf32 = True
super(self.__class__, self).tearDown()
exact_dtype = True
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.float: 1e-06, torch.cfloat: 1e-06})
@tf32_on_and_off(5e-3)
def test_inner(self, device, dtype):
def check(a_sizes_, b_sizes_):
for a_sizes, b_sizes in ((a_sizes_, b_sizes_), (b_sizes_, a_sizes_)):
a = torch.randn(a_sizes, dtype=dtype, device=device)
b = torch.randn(b_sizes, dtype=dtype, device=device)
res = torch.inner(a, b)
ref = np.inner(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(res.cpu(), torch.from_numpy(np.array(ref)))
out = torch.zeros_like(res)
torch.inner(a, b, out=out)
self.assertEqual(res, out)
check([], []) # scalar x scalar
check([], [0]) # scalar x empty
check([], [3]) # scalar x 1D
check([], [2, 3, 4]) # scalar x 3D
check([0], [0]) # empty x empty
check([0], [2, 0]) # empty x 2D
check([2], [2]) # 1D x 1D
check([2], [3, 1, 2]) # 1D x 3D
check([2], [3, 0, 2]) # 1D x 3D empty
check([1, 2], [3, 2]) # 2D x 2D
check([1, 2], [3, 4, 2]) # 2D x 3D
check([2, 1, 3, 2], [1, 3, 2, 2]) # 4D x 4D
# Test noncontiguous input
a = torch.randn(3, 2, device=device, dtype=dtype).transpose_(0, 1)
b = torch.randn(4, 3, device=device, dtype=dtype)[::2, :]
self.assertFalse(a.is_contiguous() or b.is_contiguous())
self.assertEqual(a.inner(b).cpu().numpy(), np.inner(a.cpu().numpy(), b.cpu().numpy()))
# Test error message
with self.assertRaisesRegex(RuntimeError,
r"inner\(\) the last dimension must match on both "
r"input tensors but got shapes \[2, 3\] and \[2, 2\]"):
torch.randn(2, 3, device=device, dtype=dtype).inner(torch.randn(2, 2, device=device, dtype=dtype))
# Tests torch.outer, and its alias, torch.ger, vs. NumPy
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_dtypes()))
def test_outer(self, device, dtype):
def run_test_case(a, b):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
exact_dtype = True
expected = np.outer(a_np, b_np)
self.assertEqual(torch.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.outer(a, b), expected, exact_dtype=False)
self.assertEqual(torch.ger(a, b), expected, exact_dtype=False)
self.assertEqual(torch.Tensor.ger(a, b), expected, exact_dtype=False)
# test out variant
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.outer(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
out = torch.empty(a.size(0), b.size(0), device=device, dtype=dtype)
torch.ger(a, b, out=out)
self.assertEqual(out, expected, exact_dtype=False)
a = torch.randn(50).to(device=device, dtype=dtype)
b = torch.randn(50).to(device=device, dtype=dtype)
run_test_case(a, b)
# test 0 strided tensor
zero_strided = torch.randn(1).to(device=device, dtype=dtype).expand(50)
run_test_case(zero_strided, b)
run_test_case(a, zero_strided)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
if self.device_type == 'cpu':
drivers = ('gels', 'gelsy', 'gelsd', 'gelss', None)
else:
drivers = ('gels', None)
def check_solution_correctness(a, b, sol):
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, atol=1e-5, rtol=1e-5)
def check_correctness_ref(a, b, res, ref, driver="default"):
def apply_if_not_empty(t, f):
if t.numel():
return f(t)
else:
return t
def select_if_not_empty(t, i):
selected = apply_if_not_empty(t, lambda x: x.select(0, i))
return selected
m = a.size(-2)
n = a.size(-1)
nrhs = b.size(-1)
batch_size = int(np.prod(a.shape[:-2]))
if batch_size == 0:
batch_size = 1
a_3d = a.view(batch_size, m, n)
b_3d = b.view(batch_size, m, nrhs)
solution_3d = res.solution.view(batch_size, n, nrhs)
residuals_2d = apply_if_not_empty(res.residuals, lambda t: t.view(-1, nrhs))
rank_1d = apply_if_not_empty(res.rank, lambda t: t.view(-1))
singular_values_2d = res.singular_values.view(batch_size, res.singular_values.shape[-1])
if a.numel() > 0:
for i in range(batch_size):
sol, residuals, rank, singular_values = ref(
a_3d.select(0, i).numpy(),
b_3d.select(0, i).numpy()
)
# Singular values are None when lapack_driver='gelsy' in SciPy
if singular_values is None:
singular_values = []
self.assertEqual(sol, solution_3d.select(0, i), atol=1e-5, rtol=1e-5)
self.assertEqual(rank, select_if_not_empty(rank_1d, i), atol=1e-5, rtol=1e-5)
self.assertEqual(singular_values, singular_values_2d.select(0, i), atol=1e-5, rtol=1e-5)
# SciPy and NumPy operate only on non-batched input and
# return an empty array with shape (0,) if rank(a) != n
# in PyTorch the batched inputs are supported and
# matrices in the batched input can have different ranks
# we compute residuals only if all matrices have rank == n
# see https://github.com/pytorch/pytorch/issues/56483
if m > n:
if torch.all(rank_1d == n):
self.assertEqual(
residuals, select_if_not_empty(residuals_2d, i), atol=1e-5, rtol=1e-5, exact_dtype=False
)
else:
self.assertTrue(residuals_2d.numel() == 0)
else:
self.assertEqual(res.solution.shape, (*a.shape[:-2], n, nrhs))
self.assertEqual(res.rank.shape, a.shape[:-2])
# residuals are not always computed (and have non-zero shape)
if m > n and driver != "gelsy":
self.assertEqual(res.residuals.shape, (*a.shape[:-2], 0))
else:
self.assertEqual(res.residuals.shape, (0, ))
# singular_values are not always computed (and have non-zero shape)
if driver == "default" or driver == "gelsd" or driver == "gelss":
self.assertEqual(res.singular_values.shape, (*a.shape[:-2], min(m, n)))
else:
self.assertEqual(res.singular_values.shape, (0, ))
def check_correctness_scipy(a, b, res, driver, cond):
# SciPy provides 3 driver options: gelsd, gelss, gelsy
if TEST_SCIPY and driver in ('gelsd', 'gelss', 'gelsy'):
import scipy.linalg
def scipy_ref(a, b):
return scipy.linalg.lstsq(a, b, lapack_driver=driver, cond=cond)
check_correctness_ref(a, b, res, scipy_ref, driver=driver)
def check_correctness_numpy(a, b, res, driver, rcond):
# NumPy uses only gelsd routine
if driver == 'gelsd':
def numpy_ref(a, b):
return np.linalg.lstsq(a, b, rcond=rcond)
check_correctness_ref(a, b, res, numpy_ref)
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_available = (version >= (10, 2))
ms = [2 ** i for i in range(5)]
m_ge_n_sizes = [(m, m // 2) for m in ms] + [(m, m) for m in ms]
# cases m < n are only supported on CPU and for cuSOLVER path on CUDA
m_l_n_sizes = [(m // 2, m) for m in ms]
include_m_l_n_case = (cusolver_available or device == 'cpu')
matrix_sizes = m_ge_n_sizes + (m_l_n_sizes if include_m_l_n_case else [])
batches = [(), (2,), (2, 2), (2, 2, 2)]
# we generate matrices with singular values sampled from a normal distribution,
# that is why we use `cond=1.0`, the mean to cut roughly half of all
# the singular values and compare whether torch.linalg.lstsq agrees with
# SciPy and NumPy.
# if rcond is True then set value for it based on the used algorithm
# rcond == -1 or any other negative value forces LAPACK to use machine precision tolerance
rconds = (None, True, -1)
for batch, matrix_size, driver, rcond in itertools.product(batches, matrix_sizes, drivers, rconds):
# keep the rcond value if it is None or -1, set the driver specific value if it is True
if rcond and rcond != -1:
if driver in ('gelss', 'gelsd'):
# SVD based algorithm; set to zero roughly half of all the singular values
rcond = 1.0
else:
# driver == 'gelsy'
# QR based algorithm; setting the value too high might lead to non-unique solutions and flaky tests
rcond = 1e-4
# specifying rcond value has no effect for gels driver so no need to run the tests again
if driver == 'gels' and rcond is not None:
continue
shape = batch + matrix_size
a = random_well_conditioned_matrix(*shape, dtype=dtype, device=device)
b = torch.rand(*shape, dtype=dtype, device=device)
m = a.size(-2)
n = a.size(-1)
res = torch.linalg.lstsq(a, b, rcond=rcond, driver=driver)
sol = res.solution
# Only checks gelsd, gelss, gelsy drivers
check_correctness_scipy(a, b, res, driver, rcond)
# Only checks gelsd driver
check_correctness_numpy(a, b, res, driver, rcond)
# gels driver is not checked by comparing to NumPy or SciPy implementation
# because NumPy and SciPy do not implement this driver
if driver == 'gels' and rcond is None:
check_solution_correctness(a, b, sol)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_batch_broadcasting(self, device, dtype):
from torch.testing._internal.common_utils import random_well_conditioned_matrix
def check_correctness(a, b):
sol = torch.linalg.lstsq(a, b).solution
sol2 = a.pinverse() @ b
self.assertEqual(sol, sol2, rtol=1e-5, atol=1e-5)
ms = [2 ** i for i in range(5)]
batches = [(), (0,), (2,), (2, 2), (2, 2, 2)]
# the case when a single matrix is batch-broadcasted over the rhs
for m, batch in itertools.product(ms, batches):
a = random_well_conditioned_matrix(m, m, dtype=dtype, device=device).view(*([1] * len(batch)), m, m)
b = torch.rand(*(batch + (m, m)), dtype=dtype, device=device)
check_correctness(a, b)
# cases with broadcastable shapes
for m in ms:
a = random_well_conditioned_matrix(1, 3, 1, 3, m, m, dtype=dtype, device=device)
b = torch.rand(3, 1, 3, 1, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(3, 1, 3, 1, m, dtype=dtype, device=device)
# unsqueeze for b because `check_correctness` checks against
# a.pinverse() @ b, which requires b to be a matrix
check_correctness(a, b.unsqueeze(-1))
a = random_well_conditioned_matrix(3, 1, 3, 1, m, m, dtype=dtype, device=device)
b = torch.rand(1, 3, 1, 3, m, m // 2, dtype=dtype, device=device)
check_correctness(a, b)
# rhs are vectors, not matrices in this test
b = torch.rand(1, 3, 1, 3, m, dtype=dtype, device=device)
check_correctness(a, b.unsqueeze(-1))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_lstsq_input_checks(self, device, dtype):
# check empty inputs
# empty batches
a = torch.rand(0, 0, 3, 3, dtype=dtype, device=device)
b = torch.rand(0, 0, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(0, 0, 3, 2, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a and b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 0, dtype=dtype, device=device)
)
# empty a but not b
a = torch.rand(2, 2, 3, 0, dtype=dtype, device=device)
b = torch.rand(2, 2, 3, 2, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 0, 2, dtype=dtype, device=device)
)
# empty a and b
if torch.device(device).type == 'cpu':
# only CPU since CUDA does not support overdetermined systems
a = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
b = torch.rand(2, 2, 0, 3, dtype=dtype, device=device)
self.assertEqual(
torch.linalg.lstsq(a, b)[0],
torch.zeros(2, 2, 3, 3, dtype=dtype, device=device)
)
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, 'input must have at least 2 dimensions'):
torch.linalg.lstsq(b, b)
with self.assertRaisesRegex(RuntimeError, 'other must have at least 1 dimension'):
torch.linalg.lstsq(a, torch.tensor(1, dtype=dtype, device=device))
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-1\)'):
torch.linalg.lstsq(a, b)
with self.assertRaisesRegex(RuntimeError, r'input.size\(-2\) should match other.size\(-2\)'):
torch.linalg.lstsq(a, b.unsqueeze(-1))
def complement_device(device):
if device == 'cpu' and torch.cuda.is_available():
return 'cuda'
else:
return 'cpu'
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=complement_device(device))
if a.device != b.device:
with self.assertRaisesRegex(RuntimeError, 'be on the same device'):
torch.linalg.lstsq(a, b)
b = (torch.rand(2, 2, 2, dtype=dtype, device=device) * 100).long()
with self.assertRaisesRegex(RuntimeError, 'the same dtype'):
torch.linalg.lstsq(a, b)
a = torch.rand(2, 2, 2, 2, dtype=dtype, device=device)
b = torch.rand(2, 2, 2, dtype=dtype, device=device)
if device != 'cpu':
with self.assertRaisesRegex(RuntimeError, '`driver` other than `gels` is not supported on CUDA'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# if on cpu
else:
with self.assertRaisesRegex(RuntimeError, r'parameter `driver` should be one of \(gels, gelsy, gelsd, gelss\)'):
torch.linalg.lstsq(a, b, driver='fictitious_driver')
# cuSOLVER path supports underdetermined systems
version = torch.testing._internal.common_cuda._get_torch_cuda_version()
cusolver_not_available = (version < (10, 1))
if device != 'cpu' and cusolver_not_available:
a = torch.rand(2, 3, dtype=dtype, device=device)
b = torch.rand(2, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, r'only overdetermined systems'):
torch.linalg.lstsq(a, b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
expected_L = np.linalg.cholesky(A.cpu().numpy())
actual_L = torch.linalg.cholesky(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
larger_input_case = [(100, (5, ), True)]
for shape, batch, contiguous in list(itertools.product(shapes, batches, (True, False))) + larger_input_case:
run_test(shape, batch, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 3, dtype=dtype, device=device)
out = torch.empty_like(A)
ans = torch.linalg.cholesky(A, out=out)
self.assertEqual(ans, out)
expected = torch.linalg.cholesky(A)
self.assertEqual(expected, out)
# check the upper= variant
expected = torch.linalg.cholesky(A).mH
actual = torch.linalg.cholesky(A, upper=True)
self.assertEqual(expected, actual)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# cholesky requires the input to be a square matrix or batch of square matrices
A = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
A = torch.randn(2, 2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Last 2 dimensions of the array must be square'):
np.linalg.cholesky(A.cpu().numpy())
# cholesky requires the input to be at least 2 dimensional tensor
A = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError,
r'1-dimensional array given\. Array must be at least two-dimensional'):
np.linalg.cholesky(A.cpu().numpy())
# if the input matrix is not positive definite, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky(A)
with self.assertRaisesRegex(np.linalg.LinAlgError, r'Matrix is not positive definite'):
np.linalg.cholesky(A.cpu().numpy())
# if at least one matrix in the batch is singular, an error should be raised
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[4, -1, -1] = 0 # Now A[4] is not positive definite
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 4\): The factorization could not be completed'):
torch.linalg.cholesky(A)
# if out tensor with wrong shape is passed a warning is given
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
out = torch.empty(2, 3, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cholesky(A, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*A.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cholesky(A, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.cholesky(A, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_hermitian_grad(self, device, dtype):
# Check that the gradient is Hermitian (or symmetric)
def run_test(shape):
root = torch.rand(*shape, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = torch.linalg.cholesky(root).sum().backward()
self.assertEqual(root.grad, root.grad.mH)
shapes = ((3, 3), (1, 1, 3, 3))
for shape in shapes:
run_test(shape)
# NOTE: old_cholesky* tests were moved here from test_torch.py and test_autograd.py
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_old_cholesky_batched_many_batches(self, device, dtype):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix
def cholesky_test_helper(n, batchsize, device, upper):
A = random_symmetric_pd_matrix(n, batchsize, dtype=dtype, device=device)
chol_fact = torch.cholesky(A, upper=upper)
if upper:
# Correctness check
self.assertEqual(A, chol_fact.mT.matmul(chol_fact))
# Upper triangular check
self.assertEqual(chol_fact, chol_fact.triu())
else:
# Correctness check
self.assertEqual(A, chol_fact.matmul(chol_fact.mT))
# Lower triangular check
self.assertEqual(chol_fact, chol_fact.tril())
for upper, batchsize in itertools.product([True, False], [262144, 524288]):
cholesky_test_helper(2, batchsize, device, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def cholesky_test_helper(n, batch_dims, upper):
A = random_hermitian_pd_matrix(n, *batch_dims, dtype=dtype, device=device)
cholesky_exp = torch.stack([m.cholesky(upper=upper) for m in A.reshape(-1, n, n)])
cholesky_exp = cholesky_exp.reshape_as(A)
self.assertEqual(cholesky_exp, torch.cholesky(A, upper=upper))
for upper, batchsize in itertools.product([True, False], [(3,), (3, 4), (2, 3, 4)]):
cholesky_test_helper(3, batchsize, upper)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@tf32_on_and_off(0.01)
def test_old_cholesky(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
A = random_hermitian_pd_matrix(10, dtype=dtype, device=device)
# default Case
C = torch.cholesky(A)
B = torch.mm(C, C.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0)
# test Upper Triangular
U = torch.cholesky(A, True)
B = torch.mm(U.t().conj(), U)
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (upper) did not allow rebuilding the original matrix')
# test Lower Triangular
L = torch.cholesky(A, False)
B = torch.mm(L, L.t().conj())
self.assertEqual(A, B, atol=1e-14, rtol=0, msg='cholesky (lower) did not allow rebuilding the original matrix')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_cholesky_empty(self, device, dtype):
def run_test(upper):
A = torch.empty(0, 0, dtype=dtype, device=device)
chol = torch.cholesky(A, upper)
chol_A = torch.matmul(chol, chol.t().conj())
self.assertEqual(A, chol_A)
for upper in [True, False]:
run_test(upper)
# Test for issue
# https://github.com/pytorch/pytorch/issues/57032
# torch.cholesky with upper=True for batched CUDA inputs was wrong
# it was using the lower triangular part instead of the upper one
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_cholesky_batched_upper(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
batchsize = 2
A = random_hermitian_pd_matrix(3, batchsize, dtype=dtype, device=device)
A_triu = A.triu() # fill the lower triangular part with zero
U = torch.cholesky(A_triu, upper=True)
reconstruct_A = U.mH @ U
self.assertEqual(A, reconstruct_A)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(n, batch):
A = random_hermitian_pd_matrix(n, *batch, dtype=dtype, device=device)
expected_L = np.linalg.cholesky(A.cpu().numpy())
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
actual_L, actual_info = torch.linalg.cholesky_ex(A)
# For fp32 individual entries in matrices can differ between PyTorch and NumPy
# Let's compare the norms of matrices instead
if A.numel() > 0 and dtype in [torch.float32, torch.complex64]:
# axis is specified to calculate matrix norm for batched input
expected_norm = np.linalg.norm(expected_L, ord=1, axis=(-2, -1))
actual_norm = torch.linalg.norm(actual_L, ord=1, axis=(-2, -1))
# Compare the norms with standard tolerances
self.assertEqual(actual_norm, expected_norm)
# and individual values with a higher tolerance
self.assertEqual(actual_L, expected_L, atol=1e-2, rtol=1e-5)
else:
self.assertEqual(actual_L, expected_L)
self.assertEqual(actual_info, expected_info)
ns = (0, 3, 5)
batches = ((), (2, ), (2, 1))
for n, batch in itertools.product(ns, batches):
run_test(n, batch)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_non_pd(self, device, dtype):
# if the input matrix is not positive definite, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
_, info = torch.linalg.cholesky_ex(A)
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'minor of order 3 is not positive-definite'):
torch.linalg.cholesky_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
_, info = torch.linalg.cholesky_ex(A)
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The factorization could not be completed'):
torch.linalg.cholesky_ex(A, check_errors=True)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_ex_out_info_error(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
# dtype for info must be torch.int32
A = random_hermitian_pd_matrix(3, dtype=dtype, device=device)
L = torch.empty(A.shape, dtype=dtype, device=device)
info = torch.empty(A.shape[:-2], dtype=torch.int64, device=device)
with self.assertRaisesRegex(RuntimeError, "but got info with dtype Long"):
torch.linalg.cholesky_ex(A, out=(L, info))
@onlyCPU
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_old_cholesky_autograd(self, device, dtype):
def func(root, upper):
x = 0.5 * (root + root.mH)
return torch.cholesky(x, upper)
def run_test(upper, dims):
root = torch.rand(*dims, dtype=dtype, device=device, requires_grad=True)
root = root + torch.eye(dims[-1])
gradcheck(func, [root, upper])
gradgradcheck(func, [root, upper])
root = torch.rand(*dims, dtype=dtype, device=device)
root = torch.matmul(root, root.mH)
root.requires_grad_()
chol = root.cholesky().sum().backward()
self.assertEqual(root.grad, root.grad.mH) # Check the gradient is hermitian
for upper, dims in itertools.product([True, False], [(3, 3), (4, 3, 2, 2)]):
run_test(upper, dims)
def _test_addr_vs_numpy(self, device, dtype, beta=1, alpha=1):
def check(m, a, b, beta, alpha):
if dtype == torch.bfloat16:
a_np = a.to(torch.double).cpu().numpy()
b_np = b.to(torch.double).cpu().numpy()
m_np = m.to(torch.double).cpu().numpy()
exact_dtype = False
else:
a_np = a.cpu().numpy()
b_np = b.cpu().numpy()
m_np = m.cpu().numpy()
exact_dtype = True
if beta == 0:
expected = alpha * np.outer(a_np, b_np)
else:
expected = beta * m_np + alpha * np.outer(a_np, b_np)
res = torch.addr(m, a, b, beta=beta, alpha=alpha)
self.assertEqual(res, expected, exact_dtype=exact_dtype)
# Test out variant
out = torch.empty_like(res)
torch.addr(m, a, b, beta=beta, alpha=alpha, out=out)
self.assertEqual(out, expected, exact_dtype=exact_dtype)
m = make_tensor((50, 50), device=device, dtype=dtype, low=-2, high=2)
a = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
b = make_tensor((50,), device=device, dtype=dtype, low=-2, high=2)
check(m, a, b, beta, alpha)
# test transpose
m_transpose = torch.transpose(m, 0, 1)
check(m_transpose, a, b, beta, alpha)
# test 0 strided tensor
zero_strided = make_tensor((1,), device=device, dtype=dtype, low=-2, high=2).expand(50)
check(m, zero_strided, b, beta, alpha)
# test scalar
m_scalar = torch.tensor(1, device=device, dtype=dtype)
check(m_scalar, a, b, beta, alpha)
# test nans and infs are not propagated to the output when beta == 0
float_and_complex_dtypes = get_all_fp_dtypes() + get_all_complex_dtypes()
if beta == 0 and dtype in float_and_complex_dtypes:
m[0][10] = m[10][10] = m[20][20] = float('inf')
m[1][10] = m[11][10] = m[21][20] = float('nan')
check(m, a, b, 0, alpha)
@dtypes(torch.bool)
def test_addr_bool(self, device, dtype):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=True)
self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False)
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True)
@dtypes(*(get_all_int_dtypes()))
def test_addr_integral(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'argument beta must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2., alpha=1)
with self.assertRaisesRegex(RuntimeError,
'argument alpha must not be a floating point number.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=1.)
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0, alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2)
@precisionOverride({torch.bfloat16: 1e-1})
@dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes()))
def test_addr_float_and_complex(self, device, dtype):
with self.assertRaisesRegex(RuntimeError,
'Boolean beta only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=True, alpha=1)
with self.assertRaisesRegex(RuntimeError,
'Boolean alpha only supported for Boolean results.'):
self._test_addr_vs_numpy(device, dtype, beta=2, alpha=True)
# when beta is zero
self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2)
# when beta is not zero
self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2)
if dtype in get_all_complex_dtypes():
self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j))
@dtypes(*itertools.product(get_all_dtypes(),
get_all_dtypes()))
def test_outer_type_promotion(self, device, dtypes):
a = torch.randn(5).to(device=device, dtype=dtypes[0])
b = torch.randn(5).to(device=device, dtype=dtypes[1])
for op in (torch.outer, torch.Tensor.outer, torch.ger, torch.Tensor.ger):
result = op(a, b)
self.assertEqual(result.dtype, torch.result_type(a, b))
# don't use @dtypes decorator to avoid generating ~1700 tests per device
def test_addr_type_promotion(self, device):
for dtypes0, dtypes1, dtypes2 in product(get_all_dtypes(), repeat=3):
a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2)
b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2)
m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2)
desired_dtype = torch.promote_types(torch.promote_types(dtypes0, dtypes1),
dtypes2)
for op in (torch.addr, torch.Tensor.addr):
result = op(m, a, b)
self.assertEqual(result.dtype, desired_dtype)
# Tests migrated from test_torch.py
# 1) test the shape of the result tensor when there is empty input tensor
# 2) test the Runtime Exception when there is scalar input tensor
def test_outer_ger_addr_legacy_tests(self, device):
for size in ((0, 0), (0, 5), (5, 0)):
a = torch.rand(size[0], device=device)
b = torch.rand(size[1], device=device)
self.assertEqual(torch.outer(a, b).shape, size)
self.assertEqual(torch.ger(a, b).shape, size)
m = torch.empty(size, device=device)
self.assertEqual(torch.addr(m, a, b).shape, size)
m = torch.randn(5, 6, device=device)
a = torch.randn(5, device=device)
b = torch.tensor(6, device=device)
self.assertRaises(RuntimeError, lambda: torch.outer(a, b))
self.assertRaises(RuntimeError, lambda: torch.outer(b, a))
self.assertRaises(RuntimeError, lambda: torch.ger(a, b))
self.assertRaises(RuntimeError, lambda: torch.ger(b, a))
self.assertRaises(RuntimeError, lambda: torch.addr(m, a, b))
self.assertRaises(RuntimeError, lambda: torch.addr(m, b, a))
# Tests torch.det and its alias, torch.linalg.det, vs. NumPy
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.cdouble)
def test_det(self, device, dtype):
tensors = (
torch.randn((2, 2), device=device, dtype=dtype),
torch.randn((129, 129), device=device, dtype=dtype),
torch.randn((3, 52, 52), device=device, dtype=dtype),
torch.randn((4, 2, 26, 26), device=device, dtype=dtype))
ops = (torch.det, torch.Tensor.det,
torch.linalg.det)
for t in tensors:
expected = np.linalg.det(t.cpu().numpy())
for op in ops:
actual = op(t)
self.assertEqual(actual, expected)
self.compare_with_numpy(op, np.linalg.det, t)
# NOTE: det requires a 2D+ tensor
t = torch.randn(1, device=device, dtype=dtype)
with self.assertRaises(RuntimeError):
op(t)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
# additionally we can multiply the eigenvector with a phase factor e^{i\phi} and then compare the values
# let's choose the convention that the first element of the eigenvectors from torch and numpy be the same
# for real inputs, this phase factor is plus or minus one
if matrix.numel() > 0:
phase = torch.from_numpy(expected_v[..., 0, :]).to(device=device).div(actual_v[..., 0, :])
actual_v_rotated = actual_v * phase.unsqueeze(-2).expand_as(actual_v)
self.assertEqual(actual_v_rotated, expected_v)
# check the out= variant
out_w = torch.empty_like(actual_w)
out_v = torch.empty_like(actual_v)
ans_w, ans_v = torch.linalg.eigh(matrix, UPLO=uplo, out=(out_w, out_v))
self.assertEqual(ans_w, out_w)
self.assertEqual(ans_v, out_v)
self.assertEqual(ans_w, actual_w)
self.assertEqual(abs(ans_v), abs(actual_v))
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_lower_uplo(self, device, dtype):
def run_test(shape, batch, uplo):
# check lower case uplo
# use non-symmetric input to check whether uplo argument is working as intended
matrix = torch.randn(shape, shape, *batch, dtype=dtype, device=device)
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
self.assertEqual(abs(actual_v), abs(expected_v))
uplos = ["u", "l"]
for uplo in uplos:
run_test(3, (2, 2), uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigh_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# eigh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigh(t)
# eigh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigh(a, out=(out_w, out_v))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigh(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigh(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w, expected_v = np.linalg.eigh(matrix.cpu().numpy(), UPLO=uplo)
actual_w, actual_v = torch.linalg.eigh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# sign of eigenvectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual_v), abs(expected_v))
def run_test_permuted(shape, batch, uplo):
# check for permuted / transposed inputs
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
# check for inputs with skipped elements
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_eigh_hermitian_grad(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, uplo):
x = random_hermitian_matrix(dims[-1], *dims[:-2], device=device, dtype=dtype).requires_grad_()
w, v = torch.linalg.eigh(x)
(w.sum() + abs(v).sum()).backward()
self.assertEqual(x.grad, x.grad.mH) # Check the gradient is Hermitian
for dims, uplo in itertools.product([(3, 3), (1, 1, 3, 3)], ["L", "U"]):
run_test(dims, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(shape, batch, uplo):
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
# check the out= variant
out = torch.empty_like(actual_w)
ans = torch.linalg.eigvalsh(matrix, UPLO=uplo, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, actual_w)
shapes = (0, 3, 5)
batches = ((), (3, ), (2, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test(shape, batch, uplo)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvalsh_errors_and_warnings(self, device, dtype):
# eigvalsh requires a square matrix
t = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvalsh(t)
# eigvalsh requires 'uplo' parameter to be 'U' or 'L'
t = torch.randn(3, 3, device=device, dtype=dtype)
for uplo in ["a", "wrong"]:
with self.assertRaisesRegex(RuntimeError, "be \'L\' or \'U\'"):
torch.linalg.eigvalsh(t, UPLO=uplo)
with self.assertRaisesRegex(ValueError, "be \'L\' or \'U\'"):
np.linalg.eigvalsh(t.cpu().numpy(), UPLO=uplo)
# if non-empty out tensor with wrong shape is passed a warning is given
real_dtype = t.real.dtype if dtype.is_complex else dtype
out = torch.empty_like(t).to(real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvalsh(t, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.eigvalsh(t, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvalsh(t, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
def test_eigvalsh_non_contiguous(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(matrix, uplo):
self.assertFalse(matrix.is_contiguous())
expected_w = np.linalg.eigvalsh(matrix.cpu().numpy(), UPLO=uplo)
actual_w = torch.linalg.eigvalsh(matrix, UPLO=uplo)
self.assertEqual(actual_w, expected_w)
def run_test_permuted(shape, batch, uplo):
# check for permuted / transposed inputs
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix.mT
run_test(matrix, uplo)
def run_test_skipped_elements(shape, batch, uplo):
# check for inputs with skipped elements
matrix = random_hermitian_matrix(shape, *batch, dtype=dtype, device=device)
matrix = matrix[::2]
run_test(matrix, uplo)
shapes = (3, 5)
batches = ((4, ), (4, 2))
uplos = ["U", "L"]
for shape, batch, uplo in itertools.product(shapes, batches, uplos):
run_test_permuted(shape, batch, uplo)
run_test_skipped_elements(shape, batch, uplo)
@dtypes(*floating_and_complex_types())
def test_kron(self, device, dtype):
def run_test_case(a_shape, b_shape):
a = torch.rand(a_shape, dtype=dtype, device=device)
b = torch.rand(b_shape, dtype=dtype, device=device)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(4,), (2, 2), (1, 2, 3), (1, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
run_test_case(a_shape, b_shape)
@dtypes(*floating_and_complex_types())
def test_kron_non_contiguous(self, device, dtype):
def run_test_transposed(a_shape, b_shape):
# check for transposed case
a = torch.rand(a_shape, dtype=dtype, device=device).mT
b = torch.rand(b_shape, dtype=dtype, device=device).mT
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty(result.mT.shape, dtype=dtype, device=device).mT
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
def run_test_skipped_elements(a_shape, b_shape):
# check for transposed case
a = torch.rand(2 * a_shape[0], *a_shape[1:], dtype=dtype, device=device)[::2]
b = torch.rand(2 * b_shape[0], *b_shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(a.is_contiguous())
self.assertFalse(b.is_contiguous())
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
result = torch.kron(a, b)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.kron(a, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
shapes = [(2, 2), (2, 2, 3), (2, 2, 3, 3)]
for a_shape, b_shape in itertools.product(shapes, reversed(shapes)):
# run_test_transposed(a_shape, b_shape)
run_test_skipped_elements(a_shape, b_shape)
# Test that kron perserve memory format
a = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
b = torch.randn(1, 2, 3, 4, dtype=dtype, device=device).contiguous(memory_format=torch.channels_last)
c = torch.kron(a, b)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.channels_last))
c = c.contiguous(memory_format=torch.contiguous_format)
torch.kron(a, b, out=c)
self.assertTrue(c.is_contiguous(memory_format=torch.contiguous_format))
@dtypes(*floating_and_complex_types())
def test_kron_empty(self, device, dtype):
def run_test_case(empty_shape):
a = torch.eye(3, dtype=dtype, device=device)
b = torch.empty(empty_shape, dtype=dtype, device=device)
result = torch.kron(a, b)
expected = np.kron(a.cpu().numpy(), b.cpu().numpy())
self.assertEqual(result, expected)
# NumPy doesn't work if the first argument is empty
result = torch.kron(b, a)
self.assertEqual(result.shape, expected.shape)
empty_shapes = [(0,), (2, 0), (1, 0, 3)]
for empty_shape in empty_shapes:
run_test_case(empty_shape)
@dtypes(*floating_and_complex_types())
def test_kron_errors_and_warnings(self, device, dtype):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.eye(3, dtype=dtype, device=device)
b = torch.ones((2, 2), dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.kron(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should match
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"):
torch.kron(a, b, out=out)
# This test confirms that torch.linalg.norm's dtype argument works
# as expected, according to the function's documentation
@skipCUDAIfNoMagma
def test_norm_dtype(self, device):
def run_test_case(input_size, ord, keepdim, from_dtype, to_dtype):
# Determine the best dtype to use for comparisons between tensors
# of two different types
def get_compare_dtype(type0, type1):
types_32bit_based = [torch.float, torch.cfloat]
is_complex = type0.is_complex or type1.is_complex
if type0 in types_32bit_based or type1 in types_32bit_based:
return torch.cfloat if is_complex else torch.float
else:
return torch.cdouble if is_complex else torch.double
compare_dtype = get_compare_dtype(from_dtype, to_dtype)
def get_value_type(dtype):
if dtype == torch.cfloat:
return torch.float
elif dtype == torch.cdouble:
return torch.double
elif dtype == torch.complex32:
return torch.float16
else:
return dtype
msg = (
f'input_size={input_size}, ord={ord}, keepdim={keepdim}, '
f'from_dtype={from_dtype}, to_dtype={to_dtype}')
input = torch.randn(*input_size, dtype=from_dtype, device=device)
result = torch.linalg.norm(input, ord, keepdim=keepdim)
if from_dtype.is_complex:
# By default, norm downgrades a complex input to the corresponding real number type
self.assertEqual(result.dtype, get_value_type(from_dtype), msg=msg)
else:
self.assertEqual(result.dtype, from_dtype, msg=msg)
result_out = torch.empty((0), dtype=to_dtype, device=device)
torch.linalg.norm(input, ord, keepdim=keepdim, out=result_out)
self.assertEqual(result_out.dtype, to_dtype, msg=msg)
self.assertEqual(result.to(compare_dtype), result_out.to(compare_dtype), msg=msg)
result_with_dtype = torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype)
self.assertEqual(result_with_dtype.dtype, to_dtype, msg=msg)
if from_dtype.is_complex:
result_convert_first = torch.linalg.norm(input.to(to_dtype), ord, keepdim=keepdim)
self.assertEqual(result_with_dtype.to(compare_dtype), result_convert_first.to(compare_dtype), msg=msg)
else:
self.assertEqual(result.to(compare_dtype), result_with_dtype.to(compare_dtype), msg=msg)
result_out_with_dtype = torch.empty_like(result_with_dtype)
torch.linalg.norm(input, ord, keepdim=keepdim, dtype=to_dtype, out=result_out_with_dtype)
self.assertEqual(result_out_with_dtype.dtype, to_dtype, msg=msg)
self.assertEqual(result_with_dtype, result_out_with_dtype, msg=msg)
ord_vector = [0, 0.1, -0.1, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf, None]
ord_matrix = ['fro', 'nuc', 1, -1, 2, -2, inf, -inf, None]
S = 10
test_cases = [
((S, ), ord_vector),
((S, S), ord_matrix),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
dtypes = [torch.float, torch.double, torch.cfloat, torch.cdouble]
for from_dtype, to_dtype in itertools.product(dtypes, dtypes):
if from_dtype.is_complex and not to_dtype.is_complex:
continue
run_test_case(input_size, ord, keepdim, from_dtype, to_dtype)
# Make sure that setting dtype != out.dtype raises an error
dtype_pairs = [
(torch.float, torch.double),
(torch.double, torch.float),
(torch.cfloat, torch.cdouble),
(torch.cdouble, torch.cfloat),
]
for keepdim in [True, False]:
for input_size, ord_settings in test_cases:
for ord in ord_settings:
for dtype, out_dtype in dtype_pairs:
input = torch.rand(*input_size)
result = torch.tensor([]).to(out_dtype)
with self.assertRaisesRegex(RuntimeError, r'provided dtype must match dtype of result'):
torch.linalg.norm(input, ord=ord, keepdim=keepdim, dtype=dtype, out=result)
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm(self, device, dtype):
# This test compares torch.linalg.vector_norm's output with
# torch.linalg.norm given a flattened tensor
ord_vector = [0, 0.9, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf]
input_sizes = [
(10, ),
(4, 5),
(3, 4, 5),
(0, ),
(0, 10),
(0, 0),
(10, 0, 10),
]
def vector_norm_reference(input, ord, dim=None, keepdim=False, dtype=None):
if dim is None:
input_maybe_flat = input.flatten(0, -1)
else:
input_maybe_flat = input
result = torch.linalg.norm(input_maybe_flat, ord, dim=dim, keepdim=keepdim, dtype=dtype)
if keepdim and dim is None:
result = result.reshape([1] * input.dim())
return result
def run_test_case(input, ord, dim, keepdim, norm_dtype):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}, norm_dtype={norm_dtype}'
error_msg = None
if input.numel() == 0:
if ord < 0:
error_msg = r'linalg.vector_norm of negative order cannot be performed on an empty tensor'
elif ord == inf and (dim is None or input.size(dim) == 0):
error_msg = (
r'linalg.vector_norm cannot compute the infinity norm on an empty '
r'dimension because the operation does not have an identity')
if error_msg is None:
result_dtype_reference = vector_norm_reference(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
result_dtype = torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, dtype=norm_dtype)
self.assertEqual(result_dtype, result_dtype_reference, msg=msg)
if norm_dtype is not None:
result_convert_before = torch.linalg.vector_norm(input.to(norm_dtype), ord, dim=dim, keepdim=keepdim)
if norm_dtype.is_complex:
result_convert_before = result_convert_before.to(norm_dtype)
result_out = torch.empty((0), dtype=norm_dtype, device=device)
torch.linalg.vector_norm(input, ord, dtype=norm_dtype, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_convert_before, result_out, msg=msg)
else:
result_out = torch.empty((0), dtype=result_dtype.dtype, device=device)
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim, out=result_out)
self.assertEqual(result_dtype, result_out, msg=msg)
else:
with self.assertRaises(RuntimeError):
vector_norm_reference(input, ord, dim=dim, keepdim=keepdim)
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.linalg.vector_norm(input, ord, dim=dim, keepdim=keepdim)
if dtype.is_complex:
norm_dtypes = [None, torch.cfloat, torch.cdouble]
else:
norm_dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for input_size, ord, keepdim, norm_dtype in product(input_sizes, ord_vector, [True, False], norm_dtypes):
input = make_tensor(input_size, device, dtype, low=-9, high=9)
for dim in [None, random.randint(0, len(input_size) - 1)]:
run_test_case(
input,
ord,
dim,
keepdim,
norm_dtype)
def test_vector_norm_dim_tuple_arg(self, device):
test_cases = [
# input size, dim, error, error message
((4, ), (0, ), None, None),
((4, ), (1, ), IndexError, r'Dimension out of range'),
((4, ), (-2, ), IndexError, r'Dimension out of range'),
((4, 3), (0, -1), None, None),
((4, 3), (0, 0), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, -2), RuntimeError, r'dim 0 appears multiple times in the list of dims'),
((4, 3), (0, 1.0), TypeError, r"argument 'dim' must be tuple of ints"),
((4, 3), (None, ), TypeError, r"argument 'dim' must be tuple of ints"),
]
for input_size, dim_tuple, error, error_msg in test_cases:
input = torch.randn(input_size, device=device)
# vector_norm should accept a tuple or a list for dim arg
for dim in [dim_tuple, list(dim_tuple)]:
if error is None:
torch.linalg.vector_norm(input, dim=dim)
else:
with self.assertRaises(error):
torch.linalg.vector_norm(input, dim=dim)
# Test that linalg.vector_norm throws an error if the out tensor's dtype
# does not match the expected output dtype
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble, torch.bfloat16, torch.float16)
def test_vector_norm_out_dtype_error(self, device, dtype):
input = torch.randn(10, device=device, dtype=dtype)
dtypes = [None, torch.float, torch.double, torch.cfloat, torch.cdouble, torch.float16, torch.bfloat16]
for norm_dtype, out_dtype in product(dtypes, dtypes):
if out_dtype is None:
continue
if norm_dtype is None:
if dtype == torch.cfloat:
expected_dtype = torch.float
elif dtype == torch.cdouble:
expected_dtype = torch.double
else:
expected_dtype = dtype
else:
expected_dtype = norm_dtype
result = torch.empty((0), device=device, dtype=out_dtype)
msg = f'norm_dtype: {norm_dtype}, out_dtype: {out_dtype}, expected_dtype: {expected_dtype}'
if dtype.is_complex and norm_dtype is not None and not norm_dtype.is_complex:
with self.assertRaisesRegex(RuntimeError, r"linalg.vector_norm expected complex 'dtype'", msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
elif out_dtype != expected_dtype:
with self.assertRaisesRegex(RuntimeError, r'linalg.vector_norm expected out tensor dtype', msg=msg):
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
else:
torch.linalg.vector_norm(input, dtype=norm_dtype, out=result)
# This test compares torch.linalg.norm and numpy.linalg.norm to ensure that
# their vector norm results match
@dtypes(torch.float, torch.double)
def test_norm_vector(self, device, dtype):
def run_test_case(input, p, dim, keepdim):
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
torch.linalg.norm(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
ord_vector = [0, 1, -1, 2, -2, 3, -3, 4.5, -4.5, inf, -inf]
S = 10
test_cases = [
# input size, p settings, dim
((S, ), ord_vector, None),
((S, ), ord_vector, 0),
((S, S, S), ord_vector, 0),
((S, S, S), ord_vector, 1),
((S, S, S), ord_vector, 2),
((S, S, S), ord_vector, -1),
((S, S, S), ord_vector, -2),
]
L = 1_000_000
if dtype == torch.double:
test_cases.append(((L, ), ord_vector, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_test_case(input, ord, dim, keepdim)
# This test compares torch.linalg.norm, torch.linalg.matrix_norm and numpy.linalg.norm to
# ensure that their matrix norm results match.
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_norm_matrix(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
result = torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
def check(op):
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
result_out = torch.empty_like(result)
op(input, ord, dim, keepdim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
check(torch.linalg.norm)
if ord is not None and dim is not None:
check(torch.linalg.matrix_norm)
ord_matrix = [1, -1, 2, -2, inf, -inf, 'nuc', 'fro']
S = 10
test_cases = [
# input size, p settings, dim
((S, S), ord_matrix, None),
((S, S), ord_matrix, (0, 1)),
((S, S), ord_matrix, (1, 0)),
((S, S, S, S), ord_matrix, (2, 0)),
((S, S, S, S), ord_matrix, (-1, -2)),
((S, S, S, S), ord_matrix, (-1, -3)),
((S, S, S, S), ord_matrix, (-3, 2)),
]
L = 1_000
if dtype == torch.double:
test_cases.append(((L, L), ord_matrix, None))
for keepdim in [True, False]:
for input_size, ord_settings, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
if self.device_type == 'cpu' and not torch._C.has_lapack and ord in [2, -2, 'nuc']:
continue
run_test_case(input, ord, dim, keepdim)
@onlyCUDA
@dtypes(torch.bfloat16, torch.float16)
def test_norm_fused_type_promotion(self, device, dtype):
x = torch.randn(10, device=device, dtype=dtype)
def profile_and_check(fn, x, kwargs, fn_name):
with torch.profiler.profile(activities=(torch.profiler.ProfilerActivity.CPU,)) as p:
fn(x, **kwargs, dtype=torch.float)
# smoke check that profiler returned some events
self.assertTrue(fn_name in map(lambda e: e.name, p.events()))
# test that there was no explicit copy
self.assertFalse("aten::to" in map(lambda e: e.name, p.events()))
for f, kwargs, fn_name in zip((torch.norm, torch.linalg.vector_norm), ({"p" : 2}, {}),
("aten::norm", "aten::linalg_vector_norm")):
profile_and_check(f, x, kwargs, fn_name)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond(self, device, dtype):
def run_test_case(input, p):
result = torch.linalg.cond(input, p)
result_numpy = np.linalg.cond(input.cpu().numpy(), p)
self.assertEqual(result, result_numpy, rtol=1e-2, atol=self.precision, exact_dtype=False)
self.assertEqual(result.shape, result_numpy.shape)
# test out= variant
out = torch.empty_like(result)
ans = torch.linalg.cond(input, p, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
input_sizes = [(32, 32), (2, 3, 3, 3)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test empty batch sizes
input_sizes = [(0, 3, 3), (0, 2, 5, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in norm_types:
run_test_case(input, p)
# test non-square input
input_sizes = [(16, 32), (32, 16), (2, 3, 5, 3), (2, 3, 3, 5)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in [2, -2, None]:
run_test_case(input, p)
# test for singular input
a = torch.eye(3, dtype=dtype, device=device)
a[-1, -1] = 0 # make 'a' singular
for p in norm_types:
try:
run_test_case(a, p)
except np.linalg.LinAlgError:
# Numpy may fail to converge for some BLAS backends (although this is very rare)
# See the discussion in https://github.com/pytorch/pytorch/issues/67675
pass
# test for 0x0 matrices. NumPy doesn't work for such input, we return 0
input_sizes = [(0, 0), (2, 5, 0, 0)]
for input_size in input_sizes:
input = torch.randn(*input_size, dtype=dtype, device=device)
for p in ['fro', 2]:
expected_dtype = a.real.dtype if dtype.is_complex else dtype
expected = torch.zeros(input_size[:-2], dtype=expected_dtype, device=device)
actual = torch.linalg.cond(input, p)
self.assertEqual(actual, expected)
@skipMeta # https://github.com/pytorch/pytorch/issues/53739
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3})
def test_cond_errors_and_warnings(self, device, dtype):
norm_types = [1, -1, 2, -2, inf, -inf, 'fro', 'nuc', None]
# cond expects the input to be at least 2-dimensional
a = torch.ones(3, dtype=dtype, device=device)
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'at least 2 dimensions'):
torch.linalg.cond(a, p)
# for some norm types cond expects the input to be square
a = torch.ones(3, 2, dtype=dtype, device=device)
norm_types = [1, -1, inf, -inf, 'fro', 'nuc']
for p in norm_types:
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.cond(a, p)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.ones((2, 2), dtype=dtype, device=device)
for p in ['fro', 2]:
real_dtype = a.real.dtype if dtype.is_complex else dtype
out = torch.empty(a.shape, dtype=real_dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.cond(a, p, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.cond(a, p, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
for p in ['fro', 2]:
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.cond(a, p, out=out)
# for batched input if at least one matrix in the batch is not invertible,
# we can't get the result for all other (possibly) invertible matrices in the batch without an explicit for loop.
# this should change when at::inverse works with silent errors
# NumPy works fine in this case because it's possible to silence the error and get the inverse matrix results
# possibly filled with NANs
batch_dim = 3
a = torch.eye(3, 3, dtype=dtype, device=device)
a = a.reshape((1, 3, 3))
a = a.repeat(batch_dim, 1, 1)
a[1, -1, -1] = 0 # now a[1] is singular
for p in [1, -1, inf, -inf, 'fro', 'nuc']:
result = torch.linalg.cond(a, p)
self.assertEqual(result[1], float('inf'))
# check invalid norm type
a = torch.ones(3, 3, dtype=dtype, device=device)
for p in ['wrong_norm', 5]:
with self.assertRaisesRegex(RuntimeError, f"linalg.cond got an invalid norm type: {p}"):
torch.linalg.cond(a, p)
# This test calls torch.linalg.norm and numpy.linalg.norm with illegal arguments
# to ensure that they both throw errors
@dtypes(torch.float, torch.double)
def test_norm_errors(self, device, dtype):
def run_error_test_case(input, ord, dim, keepdim, error_type, error_regex):
test_case_info = (
f'test case input.size()={input.size()}, ord={ord}, dim={dim}, '
f'keepdim={keepdim}, dtype={dtype}')
with self.assertRaisesRegex(error_type, error_regex, msg=test_case_info):
torch.linalg.norm(input, ord, dim, keepdim)
input_numpy = input.cpu().numpy()
msg = f'numpy does not raise error but pytorch does, for case "{test_case_info}"'
with self.assertRaises(Exception, msg=test_case_info):
np.linalg.norm(input_numpy, ord, dim, keepdim)
S = 10
error_test_cases = [
# input size, p settings, dim, error type, error regex
((S, ), ['fro'], None, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, ), ['nuc'], None, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), [3.5], None, RuntimeError, r'Order 3.5 not supported for matrix norm'),
((S, S), [0], None, RuntimeError, r'Order 0 not supported for matrix norm'),
((S, S), ['nuc'], 0, RuntimeError, r'order "nuc" can only be used if either len\(dim\) == 2'),
((S, S), ['fro'], 0, RuntimeError, r'order "fro" can only be used if either len\(dim\) == 2'),
((S, S), ['nuc'], (0, 0), RuntimeError, r'duplicate or invalid dimensions'),
((S, S), ['fro', 0], (0, 0), RuntimeError, r'Expected dims to be different'),
((S, S), ['fro', 'nuc', 0], (0, 4), IndexError, r'Dimension out of range'),
((S, ), [0], (4, ), IndexError, r'Dimension out of range'),
((S, ), [None], (0, 0), RuntimeError, r'dim 0 appears multiple times'),
((S, S, S), [1], (0, 1, 2), RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S, S), [1], None, RuntimeError, r"'dim' must specify 1 or 2 dimensions"),
((S, S), ['garbage'], (0, 1), RuntimeError, r'Invalid norm order: garbage'),
]
for keepdim in [True, False]:
for input_size, ord_settings, dim, error_type, error_regex in error_test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_settings:
run_error_test_case(input, ord, dim, keepdim, error_type, error_regex)
# Test complex number inputs for linalg.norm
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.cfloat, torch.cdouble)
@precisionOverride({torch.cfloat: 2e-4})
def test_norm_complex(self, device, dtype):
def gen_error_message(input_size, ord, keepdim, dim=None):
return "complex norm failed for input size %s, ord=%s, keepdim=%s, dim=%s" % (
input_size, ord, keepdim, dim)
vector_ords = [None, 0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = [None, 'fro', 'nuc', 1, 2, inf, -1, -2, -inf]
# Test supported ords
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in vector_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# matrix norm
x = torch.randn(25, 25, device=device, dtype=dtype)
xn = x.cpu().numpy()
for ord in matrix_ords:
res = torch.linalg.norm(x, ord, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord, keepdims=keepdim)
msg = gen_error_message(x.size(), ord, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, exact_dtype=False)
res_out = torch.tensor([]).to(device)
torch.linalg.norm(x, ord, keepdim=keepdim, out=res_out)
self.assertEqual(res_out.shape, expected.shape, msg=msg)
self.assertEqual(res_out.cpu(), expected, msg=msg, exact_dtype=False)
# Test that linal.vector_norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
def test_vector_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
vectors = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
for vector in vectors:
x = torch.tensor(vector, device=device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.vector_norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
@skipMeta # https://github.com/pytorch/pytorch/issues/54082
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
@precisionOverride({torch.float32: 2e-5})
def test_matrix_norm(self, device, dtype):
# Test only inputs for which torch.linalg.matrix_norm diverges from torch.linalg.norm
A = make_tensor((2, 2, 2), device, dtype)
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a matrix.*'):
torch.linalg.matrix_norm(make_tensor((2,), device, dtype))
with self.assertRaisesRegex(RuntimeError, r'linalg.matrix_norm\(\):.*must be a 2-tuple.*'):
torch.linalg.matrix_norm(A, dim=(0,))
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=0)
with self.assertRaisesRegex(RuntimeError, r'.*not supported.*'):
torch.linalg.matrix_norm(A, ord=3.0)
# Test dim=None behavior
ref = torch.linalg.norm(A, dim=(-2, -1))
res = torch.linalg.matrix_norm(A)
self.assertEqual(ref, res)
# Test that linal.norm gives the same result as numpy when inputs
# contain extreme values (inf, -inf, nan)
@unittest.skipIf(IS_WINDOWS, "Skipped on Windows!")
@unittest.skipIf(IS_MACOS, "Skipped on MacOS!")
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_extreme_values(self, device):
vector_ords = [0, 1, 2, 3, inf, -1, -2, -3, -inf]
matrix_ords = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf]
vectors = []
matrices = []
for pair in itertools.product([inf, -inf, 0.0, nan, 1.0], repeat=2):
vectors.append(list(pair))
matrices.append([[pair[0], pair[1]]])
matrices.append([[pair[0]], [pair[1]]])
for vector in vectors:
x = torch.tensor(vector).to(device)
x_n = x.cpu().numpy()
for ord in vector_ords:
msg = f'ord={ord}, vector={vector}'
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# TODO: Remove this function once the broken cases are fixed
def is_broken_matrix_norm_case(ord, x):
if self.device_type == 'cuda':
if x.size() == torch.Size([1, 2]):
if ord in ['nuc', 2, -2] and isnan(x[0][0]) and x[0][1] == 1:
# These cases are broken because of an issue with svd
# https://github.com/pytorch/pytorch/issues/43567
return True
if ord in ['nuc', 2, -2]:
# These cases are broken because of another issue with svd
# https://github.com/pytorch/pytorch/issues/52633
return True
return False
for matrix in matrices:
x = torch.tensor(matrix).to(device)
x_n = x.cpu().numpy()
for ord in matrix_ords:
msg = f'ord={ord}, matrix={matrix}'
if is_broken_matrix_norm_case(ord, x):
continue
else:
result = torch.linalg.norm(x, ord=ord)
result_n = np.linalg.norm(x_n, ord=ord)
self.assertEqual(result, result_n, msg=msg)
# Test degenerate shape results match numpy for linalg.norm vector norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(TEST_WITH_ASAN, "Skipped on ASAN since it checks for undefined behavior.")
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_vector_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
should_error = False
if ord is not None and ord < 0:
should_error = True
elif ord == inf:
if dim is None or input.size(dim) == 0:
should_error = True
if should_error:
with self.assertRaises(RuntimeError):
torch.linalg.norm(input, ord, dim, keepdim)
else:
input_numpy = input.cpu().numpy()
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
result = torch.linalg.norm(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_vector = [0, 0.5, 1, 2, 3, inf, -0.5, -1, -2, -3, -inf, None]
S = 10
test_cases = [
# input size, dim
((0, ), None),
((0, S), 0),
((0, S), 1),
((S, 0), 0),
((S, 0), 1),
]
for keepdim in [True, False]:
for input_size, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_vector:
run_test_case(input, ord, dim, keepdim)
# Test degenerate shape results match numpy for linalg.norm matrix norms
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_norm_matrix_degenerate_shapes(self, device, dtype):
def run_test_case(input, ord, dim, keepdim, should_error):
msg = f'input.size()={input.size()}, ord={ord}, dim={dim}, keepdim={keepdim}, dtype={dtype}'
input_numpy = input.cpu().numpy()
ops = [torch.linalg.norm]
if ord is not None and dim is not None:
ops.append(torch.linalg.matrix_norm)
if should_error:
with self.assertRaises(ValueError):
np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
with self.assertRaises(IndexError):
op(input, ord, dim, keepdim)
else:
result_numpy = np.linalg.norm(input_numpy, ord, dim, keepdim)
for op in ops:
result = op(input, ord, dim, keepdim)
self.assertEqual(result, result_numpy, msg=msg)
ord_matrix = ['fro', 'nuc', 1, 2, inf, -1, -2, -inf, None]
S = 10
test_cases = [
# input size, p settings that cause error, dim
((0, 0), [1, 2, inf, -1, -2, -inf], None),
((0, S), [2, inf, -2, -inf], None),
((S, 0), [1, 2, -1, -2], None),
((S, S, 0), [], (0, 1)),
((1, S, 0), [], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (0, 1)),
((0, 0, S), [1, 2, inf, -1, -2, -inf], (1, 0)),
]
for keepdim in [True, False]:
for input_size, error_ords, dim in test_cases:
input = torch.randn(*input_size, dtype=dtype, device=device)
for ord in ord_matrix:
run_test_case(input, ord, dim, keepdim, ord in error_ords)
def test_norm_fastpaths(self, device):
x = torch.randn(3, 5, device=device)
# slow path
result = torch.linalg.norm(x, 4.5, 1)
expected = torch.pow(x.abs().pow(4.5).sum(1), 1.0 / 4.5)
self.assertEqual(result, expected)
# fast 0-norm
result = torch.linalg.norm(x, 0, 1)
expected = (x != 0).type_as(x).sum(1)
self.assertEqual(result, expected)
# fast 1-norm
result = torch.linalg.norm(x, 1, 1)
expected = x.abs().sum(1)
self.assertEqual(result, expected)
# fast 2-norm
result = torch.linalg.norm(x, 2, 1)
expected = torch.sqrt(x.pow(2).sum(1))
self.assertEqual(result, expected)
# fast 3-norm
result = torch.linalg.norm(x, 3, 1)
expected = torch.pow(x.pow(3).abs().sum(1), 1.0 / 3.0)
self.assertEqual(result, expected)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_old_eig_basic(self, device, dtype):
a = torch.tensor([[1.96, 0.00, 0.00, 0.00, 0.00],
[-6.49, 3.80, 0.00, 0.00, 0.00],
[-0.47, -6.39, 4.17, 0.00, 0.00],
[-7.20, 1.50, -1.51, 5.70, 0.00],
[-0.65, -6.34, 2.67, 1.80, -7.10]],
dtype=dtype, device=device).t()
e = torch.eig(a)[0]
ee, vv = torch.eig(a, True)
te = torch.tensor((), dtype=dtype, device=device)
tv = torch.tensor((), dtype=dtype, device=device)
eee, vvv = torch.eig(a, True, out=(te, tv))
self.assertEqual(e, ee, atol=1e-12, rtol=0)
self.assertEqual(ee, eee, atol=1e-12, rtol=0)
self.assertEqual(ee, te, atol=1e-12, rtol=0)
self.assertEqual(vv, vvv, atol=1e-12, rtol=0)
self.assertEqual(vv, tv, atol=1e-12, rtol=0)
#
# compare with numpy
np_e, np_v = np.linalg.eig(a.cpu().numpy())
if dtype.is_complex:
self.assertEqual(ee, np_e)
else:
# np_e.shape == (n, 2), where each column contain the real and
# imaginary parts of the result
self.assertEqual(ee[:, 0], np_e) # real part
self.assertEqual(ee[:, 1], torch.zeros(ee.shape[0], dtype=dtype)) # imaginary part
self.assertEqual(vv, np_v)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_reuse(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, dtype=dtype, device=device)
v = torch.zeros(4, 4, dtype=dtype, device=device)
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.select(1, 0)).cpu()), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(v.cpu(), np.matmul(e.select(1, 0).diag().cpu(), v.t().cpu()))
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
self.assertTrue(v.is_contiguous(), 'V is not contiguous')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_non_contiguous(self, device, dtype):
X = torch.randn(4, 4, dtype=dtype, device=device)
X = torch.mm(X.t(), X)
e = torch.zeros(4, 2, 2, dtype=dtype, device=device)[:, 1]
v = torch.zeros(4, 2, 4, dtype=dtype, device=device)[:, 1]
self.assertFalse(v.is_contiguous(), 'V is contiguous')
self.assertFalse(e.is_contiguous(), 'E is contiguous')
torch.eig(X, True, out=(e, v))
Xhat = np.matmul(np.matmul(v.cpu(), torch.diag(e.cpu().select(1, 0))), v.t().cpu())
if dtype is torch.float:
atol = 1e-7
rtol = 1e-5
else:
atol = 1e-8
rtol = 0
self.assertEqual(X, Xhat, atol=atol, rtol=rtol, msg='VeV\' wrong')
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double, torch.float)
def test_old_eig_invalid_input(self, device, dtype):
# test invalid input
self.assertRaisesRegex(
RuntimeError,
'input should be 2 dimensional',
lambda: torch.eig(torch.ones((2))))
self.assertRaisesRegex(
RuntimeError,
'input should be square',
lambda: torch.eig(torch.ones((2, 3))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.inf * torch.ones((2, 2))))
self.assertRaisesRegex(
RuntimeError,
'input should not contain infs or NaNs',
lambda: torch.eig(np.nan * torch.ones((2, 2))))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double, torch.float)
def test_old_eig_out(self, device, dtype):
# the out version of torch.eig needs to be tested manually: we can't
# use the "test_out=True" parameter to tensor_op_tests because the
# signature is irregular (since we have *two* output vectors)
t = torch.randn(10, 10, dtype=dtype, device=device)
evals, evecs = torch.eig(t, eigenvectors=True)
#
# check that the out= version computes the same values as the normal one
out_evals = torch.empty_like(evals)
out_evecs = torch.empty_like(evecs)
evals2, evecs2 = torch.eig(t, eigenvectors=True, out=(out_evals, out_evecs))
# check that the out tensors were used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evecs2.data_ptr(), out_evecs.data_ptr())
# check that the result is the same as the non-out version
self.assertEqual(evals, out_evals)
self.assertEqual(evecs, out_evecs)
#
# check what happens in the eigenvectors=False case
out_evals = torch.empty_like(evals)
out_evecs = torch.tensor([1, 2, 3], dtype=dtype, device=device)
evals2, evecs2 = torch.eig(t, eigenvectors=False, out=(out_evals, out_evecs))
# check that the out_evals was used in-place
self.assertEqual(evals2.data_ptr(), out_evals.data_ptr())
self.assertEqual(evals, out_evals)
# check that out_evecs was NOT touched at all
assert out_evecs.tolist() == [1, 2, 3]
#
# check that we complain if we pass an out vector of the wrong dtype
wrong_out = torch.empty((0, 0), dtype=int)
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(wrong_out, out_evecs))
with self.assertRaisesRegex(RuntimeError, r"Expected .* but got .*"):
torch.eig(t, eigenvectors=True, out=(out_evals, wrong_out))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eig_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eig(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected[0], axis=-1)[::-1]
expected = (np.take_along_axis(expected[0], ind, axis=-1), np.take_along_axis(expected[1], ind[:, None], axis=-1))
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual[0].cpu().numpy(), axis=-1)[::-1]
actual_np = [x.cpu().numpy() for x in actual]
sorted_actual = (
np.take_along_axis(actual_np[0], ind, axis=-1),
np.take_along_axis(actual_np[1], ind[:, None], axis=-1))
self.assertEqual(expected[0], sorted_actual[0], exact_dtype=False)
self.assertEqual(abs(expected[1]), abs(sorted_actual[1]), exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eig(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eig(a.to(complementary_device))
self.assertEqual(expected[0], actual[0])
self.assertEqual(expected[1], actual[1])
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@slowTest
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(torch.float32)
def test_eig_check_magma(self, device, dtype):
# For CUDA inputs only matrices of size larger than 2048x2048 actually call MAGMA library
shape = (2049, 2049)
a = make_tensor(shape, dtype=dtype, device=device)
w, v = torch.linalg.eig(a)
# check correctness using eigendecomposition identity
self.assertEqual(a.to(v.dtype) @ v, w * v, atol=1e-3, rtol=1e-3)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eig_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eig(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eig(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out0 = torch.empty(0, device=device, dtype=dtype)
out1 = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvectors to be safely castable"):
torch.linalg.eig(a, out=(out0, out1))
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(0, dtype=torch.int, device=device)
out1 = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
out0 = torch.empty(0, dtype=torch.complex128, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.linalg.eig(a, out=(out0, out1))
# if non-empty out tensor with wrong shape is passed a warning is given
a = make_tensor((3, 3), dtype=dtype, device=device)
out0 = torch.empty(1, device=device, dtype=torch.complex128)
out1 = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eig(a, out=(out0, out1))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
out_v = torch.empty(0, device=device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=torch.complex128)
out_v = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eig(a, out=(out_w, out_v))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eig_with_nan(self, device, dtype):
for val in [np.inf, np.nan]:
for batch_dim in [(), (10,)]:
a = make_tensor((*batch_dim, 5, 5), device=device, dtype=dtype)
a[..., -1, -1] = val
with self.assertRaisesRegex(RuntimeError, "torch.linalg.eig: input tensor should not"):
torch.linalg.eig(a)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
# NumPy computes only in float64 and complex128 precisions
# for float32 or complex64 results might be very different from float64 or complex128
@dtypes(torch.float64, torch.complex128)
def test_eigvals_numpy(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
# unlike NumPy the result is not cast to float32 or float64 dtype in this case
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
# compare with NumPy
# the eigenvalues are not necessarily ordered
# so order of NumPy and PyTorch can be different
expected = np.linalg.eigvals(a.cpu().numpy())
# sort NumPy output
ind = np.argsort(expected, axis=-1)[::-1]
expected = np.take_along_axis(expected, ind, axis=-1)
# sort PyTorch output
# torch.argsort doesn't work with complex inputs, NumPy sorting on CPU is used instead
# RuntimeError: _th_sort not supported on CUDAType for ComplexDouble
# RuntimeError: "sorting_kernel_method_name" not implemented for 'ComplexDouble'
ind = np.argsort(actual.cpu().numpy(), axis=-1)[::-1]
actual_np = actual.cpu().numpy()
sorted_actual = np.take_along_axis(actual_np, ind, axis=-1)
self.assertEqual(expected, sorted_actual, exact_dtype=False)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
def test_eigvals_compare_backends(self, device, dtype):
def run_test(shape, *, symmetric=False):
from torch.testing._internal.common_utils import random_symmetric_matrix
if not dtype.is_complex and symmetric:
# for symmetric real-valued inputs eigenvalues and eigenvectors have imaginary part equal to zero
a = random_symmetric_matrix(shape[-1], *shape[:-2], dtype=dtype, device=device)
else:
a = make_tensor(shape, dtype=dtype, device=device)
actual = torch.linalg.eigvals(a)
complementary_device = 'cpu'
# compare with CPU
expected = torch.linalg.eigvals(a.to(complementary_device))
self.assertEqual(expected, actual)
# check out= variant
complex_dtype = dtype
if not dtype.is_complex:
complex_dtype = torch.complex128 if dtype == torch.float64 else torch.complex64
out = torch.empty(0, dtype=complex_dtype, device=device)
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
# check non-contiguous out
if a.numel() > 0:
out = torch.empty(2 * shape[0], *shape[1:-1], dtype=complex_dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.eigvals(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(expected.to(complex_dtype), out)
shapes = [(0, 0), # Empty matrix
(5, 5), # Single matrix
(0, 0, 0), (0, 5, 5), # Zero batch dimension tensors
(2, 5, 5), # 3-dim tensors
(2, 1, 5, 5)] # 4-dim tensors
for shape in shapes:
run_test(shape)
run_test(shape, symmetric=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_eigvals_errors_and_warnings(self, device, dtype):
# eig requires the input to be at least 2 dimensional tensor
a = make_tensor(2, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.eigvals(a)
# eig requires a square matrix
a = make_tensor((2, 3), dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.eigvals(a)
# if out tensor with floating dtype is passed for complex output an error is thrown
if not dtype.is_complex:
# The characteristic equation is p(λ) = λ^2 − 2λ + 5 = 0, with roots λ = 1±2i
a = torch.tensor([[3., -2.], [4., -1.]], dtype=dtype, device=device)
out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected eigenvalues to be safely castable"):
torch.linalg.eigvals(a, out=out)
# dtypes should be safely castable
a = make_tensor((3, 3), dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.linalg.eigvals(a, out=out)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty(1, device=device, dtype=torch.complex128)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.eigvals(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.eigvals(a, out=out_w)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# full reduction
x = torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3, 1.5]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
self.assertEqual(res, expected, atol=1e-5, rtol=0, msg=gen_error_message(x.size(), p, keepdim))
# one dimension
x = torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, 4, inf, -inf, -1, -2, -3]:
dim = 1
res = x.norm(p, dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
for p in ['fro', 'nuc']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# zero dimensions
x = torch.randn((), device=device)
xn = x.cpu().numpy()
res = x.norm(keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, keepdims=keepdim)
msg = gen_error_message(x.size(), None, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# larger tensor sanity check
self.assertEqual(
2 * torch.norm(torch.ones(10000), keepdim=keepdim),
torch.norm(torch.ones(40000), keepdim=keepdim))
# matrix norm with non-square >2-D tensors, all combinations of reduction dims
x = torch.randn(5, 6, 7, 8, device=device)
xn = x.cpu().numpy()
for p in ['fro', 'nuc']:
for dim in itertools.product(*[list(range(4))] * 2):
if dim[0] == dim[1]:
continue
res = x.norm(p=p, dim=dim, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, ord=p, axis=dim, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim, dim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# Test that torch.norm with p=+/-inf propagates NaN
def test_norm_old_nan_propagation(self, device):
ords = [inf, -inf]
for pair in itertools.product([0.0, nan, 1.0], repeat=2):
x = torch.tensor(list(pair), device=device)
for ord in ords:
result = torch.norm(x, p=ord)
result_check = torch.linalg.norm(x, ord=ord)
self.assertEqual(result, result_check)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_norm_complex_old(self, device):
def gen_error_message(input_size, p, keepdim, dim=None):
return "complex norm failed for input size %s, p=%s, keepdim=%s, dim=%s" % (
input_size, p, keepdim, dim)
for keepdim in [False, True]:
# vector norm
x = torch.randn(25, device=device) + 1j * torch.randn(25, device=device)
xn = x.cpu().numpy()
for p in [0, 1, 2, 3, inf, -1, -2, -3, -inf]:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg)
# matrix norm
x = torch.randn(25, 25, device=device) + 1j * torch.randn(25, 25, device=device)
xn = x.cpu().numpy()
for p in ['nuc', 'fro']:
res = x.norm(p, keepdim=keepdim).cpu()
expected = np.linalg.norm(xn, p, keepdims=keepdim)
msg = gen_error_message(x.size(), p, keepdim)
self.assertEqual(res.shape, expected.shape, msg=msg)
self.assertEqual(res, expected, msg=msg, rtol=1.3e-6, atol=3e-4)
# Ensure torch.norm with p='fro' and p=2 give the same results for mutually supported input combinations
@dtypes(torch.float)
def test_norm_fro_2_equivalence_old(self, device, dtype):
input_sizes = [
(0,),
(10,),
(0, 0),
(4, 30),
(0, 45),
(100, 0),
(45, 10, 23),
(0, 23, 59),
(23, 0, 37),
(34, 58, 0),
(0, 0, 348),
(0, 3434, 0),
(0, 0, 0),
(5, 3, 8, 1, 3, 5)]
for input_size in input_sizes:
a = make_tensor(input_size, device, dtype, low=-9, high=9)
# Try full reduction
dim_settings = [None]
# Try all possible 1-D reductions
dim_settings += list(range(-a.dim(), a.dim()))
def wrap_dim(dim, ndims):
assert (dim < ndims) and (dim >= -ndims)
if dim >= 0:
return dim
else:
return dim + ndims
# Try all possible 2-D reductions
dim_settings += [
(d0, d1) for d0, d1 in itertools.combinations(range(-a.dim(), a.dim()), 2)
if wrap_dim(d0, a.dim()) != wrap_dim(d1, a.dim())]
for dim in dim_settings:
for keepdim in [True, False]:
a_norm_2 = torch.norm(a, p=2, dim=dim, keepdim=keepdim)
a_norm_fro = torch.norm(a, p='fro', dim=dim, keepdim=keepdim)
self.assertEqual(a_norm_fro, a_norm_2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_nuclear_norm_axes_small_brute_force_old(self, device):
def check_single_nuclear_norm(x, axes):
if self.device_type != 'cpu' and randrange(100) < 95:
return # too many cpu <==> device copies
a = np.array(x.cpu(), copy=False)
expected = np.linalg.norm(a, "nuc", axis=axes)
ans = torch.norm(x, "nuc", dim=axes)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
out = torch.zeros(expected.shape, dtype=x.dtype, device=x.device)
ans = torch.norm(x, "nuc", dim=axes, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertEqual(ans.shape, expected.shape)
self.assertEqual(ans.cpu(), expected, rtol=1e-02, atol=1e-03, equal_nan=True)
for n in range(1, 3):
for m in range(1, 3):
for axes in itertools.permutations([0, 1], 2):
# 2d, inner dimensions C
x = torch.randn(n, m, device=device)
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions Fortran
x = torch.randn(m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 2d, inner dimensions non-contiguous
x = torch.randn(n, 2 * m, device=device)[:, ::2]
check_single_nuclear_norm(x, axes)
# 2d, all dimensions non-contiguous
x = torch.randn(7 * n, 2 * m, device=device)[::7, ::2]
check_single_nuclear_norm(x, axes)
for o in range(1, 3):
for axes in itertools.permutations([0, 1, 2], 2):
# 3d, inner dimensions C
x = torch.randn(o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions Fortran
x = torch.randn(o, m, n, device=device).mT
check_single_nuclear_norm(x, axes)
# 3d, inner dimensions non-contiguous
x = torch.randn(o, n, 2 * m, device=device)[:, :, ::2]
check_single_nuclear_norm(x, axes)
# 3d, all dimensions non-contiguous
x = torch.randn(7 * o, 5 * n, 2 * m, device=device)[::7, ::5, ::2]
check_single_nuclear_norm(x, axes)
for r in range(1, 3):
for axes in itertools.permutations([0, 1, 2, 3], 2):
# 4d, inner dimensions C
x = torch.randn(r, o, n, m, device=device)
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions Fortran
x = torch.randn(r, o, n, m, device=device).mT
check_single_nuclear_norm(x, axes)
# 4d, inner dimensions non-contiguous
x = torch.randn(r, o, n, 2 * m, device=device)[:, :, :, ::2]
check_single_nuclear_norm(x, axes)
# 4d, all dimensions non-contiguous
x = torch.randn(7 * r, 5 * o, 11 * n, 2 * m, device=device)[::7, ::5, ::11, ::2]
check_single_nuclear_norm(x, axes)
@skipCUDAIfNoMagma
def test_nuclear_norm_exceptions_old(self, device):
for lst in [], [1], [1, 2]:
x = torch.tensor(lst, dtype=torch.double, device=device)
for axes in (), (0,):
self.assertRaises(RuntimeError, torch.norm, x, "nuc", axes)
self.assertRaises(IndexError, torch.norm, x, "nuc", (0, 1))
x = torch.tensor([[0, 1, 2], [3, 4, 5]], dtype=torch.double, device=device)
self.assertRaisesRegex(RuntimeError, "duplicate or invalid", torch.norm, x, "nuc", (0, 0))
self.assertRaisesRegex(IndexError, "Dimension out of range", torch.norm, x, "nuc", (0, 2))
# ~~~ tests for torch.svd ~~~
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd(self, device, dtype):
def run_test(dims, some, compute_uv):
x = torch.randn(*dims, dtype=dtype, device=device)
outu = torch.empty(0, dtype=dtype, device=device)
outs = torch.empty(0, dtype=dtype, device=device)
outv = torch.empty(0, dtype=dtype, device=device)
torch.svd(x, some=some, compute_uv=compute_uv, out=(outu, outs, outv))
if compute_uv:
if some:
x_recon = torch.matmul(outu, torch.matmul(outs.diag_embed(), outv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = outu[..., :min(*dims[-2:])]
narrow_v = outv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(outs.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, outs, msg='Singular values mismatch')
self.assertEqual(outu, torch.zeros_like(outu), msg='U not zero')
self.assertEqual(outv, torch.zeros_like(outv), msg='V not zero')
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
self.assertEqual(resu, outu, msg='outputs of svd and svd with out differ')
self.assertEqual(ress, outs, msg='outputs of svd and svd with out differ')
self.assertEqual(resv, outv, msg='outputs of svd and svd with out differ')
# test non-contiguous
x = torch.randn(*dims, dtype=dtype, device=device)
if x.numel() > 0:
n_dim = len(dims)
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
resu, ress, resv = torch.svd(x, some=some, compute_uv=compute_uv)
if compute_uv:
if some:
x_recon = torch.matmul(resu, torch.matmul(ress.diag_embed(), resv.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
narrow_u = resu[..., :min(*dims[-2:])]
narrow_v = resv[..., :min(*dims[-2:])]
x_recon = torch.matmul(narrow_u, torch.matmul(ress.diag_embed(), narrow_v.mT))
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using U @ diag(S) @ V.T')
else:
_, singvals, _ = torch.svd(x, compute_uv=True)
self.assertEqual(singvals, ress, msg='Singular values mismatch')
self.assertEqual(resu, torch.zeros_like(resu), msg='U not zero')
self.assertEqual(resv, torch.zeros_like(resv), msg='V not zero')
shapes = [(0, 0), (5, 0), (0, 5), # empty matrices
(0, 0, 0), (0, 5, 5), (0, 5, 3), # zero batch dimension
(3, 3), (5, 3, 3), (7, 5, 3, 3), # square matrices
(7, 3), (5, 7, 3), (7, 5, 7, 3), # fat matrices
(3, 7), (5, 3, 7), (7, 5, 3, 7)] # thin matrices
for dims, some, compute_uv in product(shapes, [True, False], [True, False]):
run_test(dims, some, compute_uv)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_svd_no_singularvectors(self, device, dtype):
for size in [(5, 5), (5, 20), (20, 5)]:
a = torch.randn(*size, device=device, dtype=dtype)
u, s_expect, v = torch.svd(a)
u, s_actual, v = torch.svd(a, compute_uv=False)
self.assertEqual(s_expect, s_actual, msg="Singular values don't match")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_svd_lowrank(self, device, dtype):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
assert batches == ()
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
q = min(*size)
u, s, v = svd_lowrank(a_input, q=q, **options)
# check if u, s, v is a SVD
u, s, v = u[..., :q], s[..., :q], v[..., :q]
A = u.matmul(s.diag_embed()).matmul(v.mT)
self.assertEqual(A, a, rtol=1e-7, atol=2e-7)
# check if svd_lowrank produces same singular values as torch.svd
U, S, V = torch.svd(a)
self.assertEqual(s.shape, S.shape)
self.assertEqual(u.shape, U.shape)
self.assertEqual(v.shape, V.shape)
self.assertEqual(s, S)
if density == 1:
# actual_rank is known only for dense inputs
#
# check if pairs (u, U) and (v, V) span the same
# subspaces, respectively
u, s, v = u[..., :actual_rank], s[..., :actual_rank], v[..., :actual_rank]
U, S, V = U[..., :actual_rank], S[..., :actual_rank], V[..., :actual_rank]
self.assertEqual(u.mT.matmul(U).det().abs(), torch.ones(batches, device=device, dtype=dtype))
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
(10, (100, 40), all_batches),
(7, (1000, 1000), [()]),
]:
# dense input
for batches in all_batches:
run_subtest(actual_rank, size, batches, device, torch.svd_lowrank)
if size != size[::-1]:
run_subtest(actual_rank, size[::-1], batches, device, torch.svd_lowrank)
# sparse input
for size in [(17, 4), (4, 17), (17, 17), (100, 40), (40, 100), (1000, 1000)]:
for density in [0.005, 0.1]:
run_subtest(None, size, (), device, torch.svd_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.svd_lowrank)
actual_rank, size, batches = 2, (17, 4), ()
run_subtest(actual_rank, size, batches, device, jitted)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.cfloat)
def test_svd_complex(self, device, dtype):
# this test verifies that torch.svd really returns V and not V.conj()
# see: https://github.com/pytorch/pytorch/issues/45821
t = torch.randn((10, 10), dtype=dtype, device=device)
U, S, V = torch.svd(t, some=False)
# verify that t ≈ t2
# t2 = U @ diag(S) @ Vᴴ
# Vᴴ is the conjugate transpose of V
t2 = U @ torch.diag(S).type(dtype) @ V.conj().T
self.assertEqual(t, t2)
def _test_svd_helper(self, shape, some, col_maj, device, dtype):
# test implementation below uses cpu unconditionally
if not torch._C.has_lapack:
reason = "PyTorch compiled without Lapack"
raise unittest.SkipTest(reason)
# To have accurate tests and less false positives on different CPUs and GPUs,
# we use double or complex double accuracy for CPU reference.
cpu_dtype = torch.complex128 if dtype.is_complex else torch.float64
cpu_tensor = torch.randn(shape, device='cpu', dtype=cpu_dtype)
device_tensor = cpu_tensor.to(device=device, dtype=dtype)
if col_maj:
cpu_tensor = cpu_tensor.t()
device_tensor = device_tensor.t()
cpu_result = torch.svd(cpu_tensor, some=some)
device_result = torch.svd(device_tensor, some=some)
m = min(cpu_tensor.shape[-2:])
# torch.svd returns torch.return_types.svd which is a tuple of (U, V, S).
# - When some==False, U[..., m:] can be arbitrary.
# - When some==True, U shape: [..., m], V shape: [m, m]
# - Signs are not deterministic. If the sign of a column of U is changed
# then the corresponding column of the V has to be changed.
# Thus here we only compare result[..., :m].abs() from CPU and device.
for x, y in zip(cpu_result, device_result):
self.assertEqual(x[..., :m].abs(), y[..., :m].abs(), exact_dtype=False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_errors_and_warnings(self, device, dtype):
for svd in [torch.svd, torch.linalg.svd]:
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_u = torch.empty(2, 2, dtype=dtype, device=device)
out_s = torch.empty(4, 4, dtype=real_dtype, device=device)
out_v = torch.empty(6, 6, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
svd(a, out=(out_u, out_s, out_v))
# Check warning occurs
self.assertEqual(len(w), 3)
self.assertTrue("An output with one or more elements was resized" in str(w[-3].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_u = torch.empty(0, dtype=torch.int, device=device)
out_s = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got U with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, dtype=dtype, device=device)
if svd == torch.linalg.svd:
msg = "but got Vh with dtype Int"
else:
msg = "but got V with dtype Int"
with self.assertRaisesRegex(RuntimeError, msg):
svd(a, out=(out_u, out_s, out_v))
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got S with dtype Int"):
svd(a, out=(out_u, out_s, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_u = torch.empty(0, device=wrong_device, dtype=dtype)
out_s = torch.empty(0, device=wrong_device, dtype=real_dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_u
svd(a, out=(out_u, out_s, out_v))
out_u = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_s
svd(a, out=(out_u, out_s, out_v))
out_s = torch.empty(0, device=device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
# error from out_v
svd(a, out=(out_u, out_s, out_v))
# if input contains NaN then an error is triggered for svd
# When cuda < 11.5, cusolver raises CUSOLVER_STATUS_EXECUTION_FAILED when input contains nan.
# When cuda >= 11.5, cusolver normally finishes execution and sets info array indicating convergence issue.
error_msg = r'(CUSOLVER_STATUS_EXECUTION_FAILED|The algorithm failed to converge)'
a = torch.full((3, 3), float('nan'), dtype=dtype, device=device)
a[0] = float('nan')
with self.assertRaisesRegex(torch.linalg.LinAlgError, error_msg):
svd(a)
error_msg = r'(CUSOLVER_STATUS_EXECUTION_FAILED|\(Batch element 1\): The algorithm failed to converge)'
a = torch.randn(3, 33, 33, dtype=dtype, device=device)
a[1, 0, 0] = float('nan')
with self.assertRaisesRegex(torch.linalg.LinAlgError, error_msg):
svd(a)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_square(self, device, dtype):
self._test_svd_helper((10, 10), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_square_col_maj(self, device, dtype):
self._test_svd_helper((10, 10), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some(self, device, dtype):
self._test_svd_helper((20, 5), True, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all(self, device, dtype):
self._test_svd_helper((20, 5), False, False, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_some_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), True, True, device, dtype)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_types())
def test_svd_tall_all_col_maj(self, device, dtype):
self._test_svd_helper((5, 20), False, True, device, dtype)
# ~~~ tests for torch.linalg.svd ~~~
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_svd_compute_uv(self, device, dtype):
"""
Test the default case. Here we have the very same behavior as
NumPy with compute_uv=True.
"""
t = torch.randn((10, 11), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for full_matrices in (True, False):
# check linalg.svd vs numpy
expected = np.linalg.svd(np_t, full_matrices, compute_uv=True)
actual = torch.linalg.svd(t, full_matrices)
# sign/phase of the singular vectors is not unique and therefore absolute values are compared
self.assertEqual(abs(actual[0]), abs(expected[0]))
self.assertEqual(actual[1], expected[1])
self.assertEqual(abs(actual[2]), abs(expected[2]))
# check linalg.svd vs linalg.svd(out=...)
out = (torch.empty_like(actual[0]),
torch.empty_like(actual[1]),
torch.empty_like(actual[2]))
out2 = torch.linalg.svd(t, full_matrices, out=out)
self.assertEqual(actual, out)
self.assertEqual(actual, out2)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svdvals(self, device, dtype):
def run_test(shape):
# NumPy doesn't have separate svdvals function, it is included in
# svd with compute_uv=False
# so we test our implementation against numpy.linalg.svd(*, compute_uv=False)
A = make_tensor(shape, dtype=dtype, device=device)
expected = np.linalg.svd(A.cpu(), compute_uv=False)
actual = torch.linalg.svdvals(A)
self.assertEqual(actual, expected)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in itertools.product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoCusolver # MAGMA backend doesn't work in this case
@skipCUDAIfRocm
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_svd_memory_allocation(self, device, dtype):
# test for https://github.com/pytorch/pytorch/issues/61949
# the problem was that tensors of incorrect size were allocated and then narrowed
m = 3
n = 2**20
a = make_tensor((m, n), dtype=dtype, device=device)
# the following should run without errors
result = torch.linalg.svdvals(a)
result = torch.linalg.svd(a, full_matrices=False)
out0 = torch.empty_like(result[0])
out1 = torch.empty_like(result[1])
out2 = torch.empty_like(result[2])
torch.linalg.svdvals(a, out=out0)
torch.linalg.svd(a, full_matrices=False, out=(out0, out1, out2))
def cholesky_solve_test_helper(self, A_dims, b_dims, upper, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = random_hermitian_pd_matrix(*A_dims, dtype=dtype, device=device)
L = torch.cholesky(A, upper=upper)
return b, A, L
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve(self, device, dtype):
for (k, n), upper in itertools.product(zip([2, 3, 5], [3, 5, 7]), [True, False]):
b, A, L = self.cholesky_solve_test_helper((n,), (n, k), upper, device, dtype)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched(self, device, dtype):
def cholesky_solve_batch_helper(A_dims, b_dims, upper):
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.cholesky_solve(b[i], L[i], upper=upper))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.cholesky_solve(b, L, upper=upper) # Actual output
self.assertEqual(x_act, x_exp) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax) # Correctness check
for upper, batchsize in itertools.product([True, False], [1, 3, 4]):
cholesky_solve_batch_helper((5, batchsize), (batchsize, 5, 10), upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
for upper in [True, False]:
A = random_hermitian_pd_matrix(2, 2, dtype=dtype, device='cpu')
b = torch.randn(2, 2, 2, dtype=dtype, device='cpu')
x_exp = solve(A.permute(0, 2, 1).numpy(), b.permute(2, 1, 0).numpy())
A = A.to(device).permute(0, 2, 1)
b = b.to(device).permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
L = torch.cholesky(A, upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(5, 256, 256), (5,)], [(5, 10), (512, 512, 5, 10)]):
for upper in [True, False]:
b, A, L = self.cholesky_solve_test_helper(A_dims, b_dims, upper, device, dtype)
x = torch.cholesky_solve(b, L, upper)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_cholesky_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(A_dims, b_dims, upper):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = random_hermitian_pd_matrix(A_matrix_size, *A_batch_dims,
dtype=dtype, device='cpu')
b = torch.randn(*b_dims, dtype=dtype, device='cpu')
x_exp = torch.tensor(solve(A.numpy(), b.numpy()), dtype=dtype, device=device)
A, b = A.to(dtype=dtype, device=device), b.to(dtype=dtype, device=device)
L = torch.linalg.cholesky(A, upper=upper)
x = torch.cholesky_solve(b, L, upper=upper)
self.assertEqual(x, x_exp)
# https://github.com/pytorch/pytorch/issues/42695
x = torch.cholesky_solve(b, L, upper=upper, out=x)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
for upper in [True, False]:
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), upper) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), upper) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), upper) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), upper) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64, torch.complex128)
def test_cholesky_solve_autograd(self, device, dtype):
def run_test(A_dims, B_dims, upper):
root = torch.randn(*A_dims, device=device, dtype=dtype).requires_grad_()
b = torch.randn(*B_dims, device=device, dtype=dtype).requires_grad_()
def func(root, b, upper):
if upper:
A = root.triu()
else:
A = root.tril()
return torch.cholesky_solve(b, A, upper)
gradcheck(func, [root, b, upper])
# TODO(#50743): the following fails with batched grad testing
# TODO(#56235): disabling temporarily
# gradgradcheck(func, [root, b, upper], atol=1e-3, check_batched_grad=False)
for (a_size, b_size), upper in itertools.product([((3, 3), (3, 4)), ((3, 3), (3, 2)),
((2, 3, 3), (2, 3, 4)), ((2, 3, 3), (2, 3, 2))],
[True, False]):
run_test(a_size, b_size, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_solve(b, a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.cholesky_solve(b, a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.cholesky_solve(b, a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_inverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(torch_inverse, matrix, batches, n):
matrix_inverse = torch_inverse(matrix)
# Compare against NumPy output
# NumPy uses 'gesv' LAPACK routine solving the equation A A_inv = I
# But in PyTorch 'gertf' + 'getri' is used causing element-wise differences
expected = np.linalg.inv(matrix.cpu().numpy())
self.assertEqual(matrix_inverse, expected, atol=self.precision, rtol=self.precision)
# Additional correctness tests, check matrix*matrix_inverse == identity
identity = torch.eye(n, dtype=dtype, device=device)
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix.cpu(), matrix_inverse.cpu()))
self.assertEqual(identity.expand_as(matrix), np.matmul(matrix_inverse.cpu(), matrix.cpu()))
# check the out= variant
# prepare the expected out tensor
matrix_inverse_out = torch.empty(*batches, n, n, dtype=dtype, device=device)
matrix_inverse_out_t = matrix_inverse_out.mT.clone(memory_format=torch.contiguous_format)
matrix_inverse_out = matrix_inverse_out_t.mT
ans = torch_inverse(matrix, out=matrix_inverse_out)
self.assertEqual(matrix_inverse_out, ans, atol=0, rtol=0)
self.assertEqual(matrix_inverse_out, matrix_inverse, atol=0, rtol=0)
# batched matrices: 3+ dimensional tensors, check matrix_inverse same as single-inverse for each matrix
if matrix.ndim > 2 and batches[0] != 0:
expected_inv_list = []
p = int(np.prod(batches)) # use `p` instead of -1, so that the test works for empty input as well
for mat in matrix.contiguous().view(p, n, n):
expected_inv_list.append(torch_inverse(mat))
expected_inv = torch.stack(expected_inv_list).view(*batches, n, n)
if self.device_type == 'cuda' and dtype in [torch.float32, torch.complex64]:
# single-inverse is done using cuSOLVER, while batched inverse is done using MAGMA
# individual values can be significantly different for fp32, hence rather high rtol is used
# the important thing is that torch_inverse passes above checks with identity
self.assertEqual(matrix_inverse, expected_inv, atol=1e-1, rtol=1e-2)
else:
self.assertEqual(matrix_inverse, expected_inv)
# helper function for testing torch.linalg.inv_ex
def test_inv_ex(input, out=None):
if out is not None:
info = torch.empty(0, dtype=torch.int32, device=device)
return torch.linalg.inv_ex(input, out=(out, info)).inverse
return torch.linalg.inv_ex(input).inverse
for torch_inverse in [torch.inverse, torch.linalg.inv, test_inv_ex]:
for batches, n in itertools.product(
[[], [0], [2], [2, 1]],
[0, 5]
):
matrices = make_arg(*batches, n, n)
run_test(torch_inverse, matrices, batches, n)
# test non-contiguous input
run_test(torch_inverse, matrices.mT, batches, n)
if n > 0:
run_test(
torch_inverse,
make_arg(*batches, 2 * n, 2 * n)
.view(-1, n * 2, n * 2)[:, ::2, ::2].view(*batches, n, n),
batches, n
)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_ex_info_device(self, device, dtype):
A = torch.eye(3, 3, dtype=dtype, device=device)
info = torch.linalg.inv_ex(A).info
self.assertTrue(info.device == A.device)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@skipCUDAIfRocm
def test_inv_ex_singular(self, device, dtype):
# if the input matrix is not invertible, info with positive integer is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A[-1, -1] = 0 # Now A is singular
info = torch.linalg.inv_ex(A).info
self.assertEqual(info, 3)
with self.assertRaisesRegex(torch.linalg.LinAlgError,
r'diagonal element 3 is zero, the inversion could not be completed'):
torch.linalg.inv_ex(A, check_errors=True)
# if at least one matrix in the batch is not positive definite,
# batched info with positive integer for the corresponding matrix is returned
A = torch.eye(3, 3, dtype=dtype, device=device)
A = A.reshape((1, 3, 3))
A = A.repeat(5, 1, 1)
A[3, -2, -2] = 0 # Now A[3] is singular
info = torch.linalg.inv_ex(A).info
expected_info = torch.zeros(A.shape[:-2], dtype=torch.int32, device=device)
expected_info[3] = 2
self.assertEqual(info, expected_info)
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 3\): The diagonal element 2 is zero'):
torch.linalg.inv_ex(A, check_errors=True)
@slowTest
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 2e-3, torch.complex64: 2e-3,
torch.float64: 1e-5, torch.complex128: 1e-5})
def test_inverse_many_batches(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def test_inverse_many_batches_helper(torch_inverse, b, n):
matrices = make_arg(b, n, n)
matrices_inverse = torch_inverse(matrices)
# Compare against NumPy output
expected = np.linalg.inv(matrices.cpu().numpy())
self.assertEqual(matrices_inverse, expected, atol=self.precision, rtol=1e-3)
for torch_inverse in [torch.inverse, torch.linalg.inv]:
test_inverse_many_batches_helper(torch_inverse, 5, 256)
test_inverse_many_batches_helper(torch_inverse, 3, 512)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@dtypes(*floating_and_complex_types())
def test_inverse_errors(self, device, dtype):
# inverse expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.inverse(torch.randn(2, 3, 4, 3))
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
x = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
x[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.inverse(x)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@onlyNativeDeviceTypes # TODO: XLA doesn't raise exception
@skipCUDAIfRocm
@skipCUDAVersionIn([(11, 3), (11, 5)]) # https://github.com/pytorch/pytorch/issues/57482
@dtypes(*floating_and_complex_types())
def test_inverse_errors_large(self, device, dtype):
# Test batched inverse of singular matrices reports errors without crashing (gh-51930)
x = torch.empty((8, 10, 616, 616), dtype=dtype, device=device)
x[:] = torch.eye(616, dtype=dtype, device=device)
x[..., 10, 10] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, r'\(Batch element 0\): The diagonal element 11 is zero'):
torch.inverse(x)
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3, torch.float64: 1e-7, torch.complex128: 1e-7})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test_main(A, hermitian):
# Testing against definition for pseudo-inverses
A_pinv = torch.linalg.pinv(A, hermitian=hermitian)
np_A = A.cpu().numpy()
np_A_pinv = A_pinv.cpu().numpy()
if A.numel() > 0:
self.assertEqual(A, np_A @ np_A_pinv @ np_A, atol=self.precision, rtol=self.precision)
self.assertEqual(A_pinv, np_A_pinv @ np_A @ np_A_pinv, atol=self.precision, rtol=self.precision)
self.assertEqual(np_A @ np_A_pinv, (np_A @ np_A_pinv).conj().swapaxes(-2, -1))
self.assertEqual(np_A_pinv @ np_A, (np_A_pinv @ np_A).conj().swapaxes(-2, -1))
else:
self.assertEqual(A.shape, A_pinv.shape[:-2] + (A_pinv.shape[-1], A_pinv.shape[-2]))
# Check out= variant
out = torch.empty_like(A_pinv)
ans = torch.linalg.pinv(A, hermitian=hermitian, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, A_pinv)
def run_test_numpy(A, hermitian):
# Check against NumPy output
# Test float rcond, and specific value for each matrix
rconds = [float(torch.rand(1)), ]
# Test different types of rcond tensor
for rcond_type in all_types():
rconds.append(torch.rand(A.shape[:-2], dtype=torch.double, device=device).to(rcond_type))
# Test broadcasting of rcond
if A.ndim > 2:
rconds.append(torch.rand(A.shape[-3], device=device))
for rcond in rconds:
actual = torch.linalg.pinv(A, rcond=rcond, hermitian=hermitian)
torch_rtol = torch.linalg.pinv(A, rtol=rcond, hermitian=hermitian)
self.assertEqual(actual, torch_rtol)
numpy_rcond = rcond if isinstance(rcond, float) else rcond.cpu().numpy()
expected = np.linalg.pinv(A.cpu().numpy(), rcond=numpy_rcond, hermitian=hermitian)
self.assertEqual(actual, expected, atol=self.precision, rtol=1e-5)
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(3, 2), (5, 3, 2), (2, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (2, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
A = torch.randn(*sizes, dtype=dtype, device=device)
hermitian = False
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
# Check hermitian = True
for sizes in [(5, 5), (3, 5, 5), (3, 2, 5, 5), # square matrices
(0, 0), (3, 0, 0), ]: # zero numel square matrices
A = random_hermitian_pd_matrix(sizes[-1], *sizes[:-2], dtype=dtype, device=device)
hermitian = True
run_test_main(A, hermitian)
run_test_numpy(A, hermitian)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinv_errors_and_warnings(self, device, dtype):
# pinv requires at least 2D tensor
a = torch.randn(1, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "expected a tensor with 2 or more dimensions"):
torch.linalg.pinv(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, dtype=dtype, device=device)
out = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.pinv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes of out and input should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.pinv(a, out=out)
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected result and input tensors to be on the same device"):
torch.linalg.pinv(a, out=out)
# device of rcond and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
rcond = torch.full((), 1e-2, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.pinv(a, rcond=rcond)
# rcond can't be complex
rcond = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rcond tensor of complex type is not supported"):
torch.linalg.pinv(a, rcond=rcond)
# atol can't be complex
atol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "atol tensor of complex type is not supported"):
torch.linalg.pinv(a, atol=atol)
# rtol can't be complex
rtol = torch.full((), 1j, device=device)
with self.assertRaisesRegex(RuntimeError, "rtol tensor of complex type is not supported"):
torch.linalg.pinv(a, rtol=rtol)
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_inv_errors_and_warnings(self, device, dtype):
# inv expects batches of square matrices as input
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.linalg.inv(a)
# inv requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.linalg.inv(a)
# if input is not invertible, RuntimeError is raised mentioning the first non-invertible batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf"\(Batch element {n}\): The diagonal element 3 is zero"):
torch.linalg.inv(a)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# dtypes should match
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "got result with dtype Int"):
torch.linalg.inv(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.inv(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor in batched column major format but with wrong a warning is given
with warnings.catch_warnings(record=True) as w:
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(3, 3, dtype=dtype, device=device)
out = out.mT.clone(memory_format=torch.contiguous_format)
out = out.mT
self.assertTrue(out.mT.is_contiguous())
# Trigger warning
torch.linalg.inv(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
def solve_test_helper(self, A_dims, b_dims, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
return b, A
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve(self, device, dtype):
def run_test(n, batch, rhs):
A_dims = (*batch, n, n)
b_dims = (*batch, n, *rhs)
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
# Correctness test
x = torch.linalg.solve(A, b)
if rhs == ():
Ax = np.matmul(A.cpu(), x.unsqueeze(-1).cpu())
Ax.squeeze_(-1)
else:
Ax = np.matmul(A.cpu(), x.cpu())
self.assertEqual(b.expand_as(Ax), Ax)
# Check against NumPy
expected = np.linalg.solve(A.cpu().numpy(), b.expand_as(x).cpu().numpy())
self.assertEqual(x, expected)
# Check out= variant
out = torch.empty_like(x)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
# Check out= variant with complex128 out tensor
out = torch.empty_like(x).to(torch.complex128)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x.to(torch.complex128), out)
# Check empty out
out = torch.empty(0, dtype=dtype, device=device)
ans = torch.linalg.solve(A, b, out=out)
self.assertEqual(ans, out)
self.assertEqual(x, out)
batches = [(), (0, ), (3, ), (2, 3)]
ns = [0, 5, 32]
nrhs = [(), (1, ), (5, )]
for n, batch, rhs in itertools.product(ns, batches, nrhs):
run_test(n, batch, rhs)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3})
def test_solve_batched_non_contiguous(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
A = make_A(2, 2, 2).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
self.assertFalse(A.is_contiguous())
self.assertFalse(b.is_contiguous())
actual = torch.linalg.solve(A, b)
expected = np.linalg.solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(actual, expected)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_solve_errors_and_warnings(self, device, dtype):
# solve expects batches of square matrices as input
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
a = torch.randn(2, 3, 4, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 4, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# solve expects compatible shapes for A x = b
with self.assertRaisesRegex(RuntimeError, "Incompatible matrix sizes"):
a = torch.randn(2, 3, 3, 3, dtype=dtype, device=device)
b = torch.randn(2, 3, 2, 1, dtype=dtype, device=device)
torch.linalg.solve(a, b)
# if input is not solvable, RuntimeError is raised mentioning the first non-solvable batch
def run_test_singular_input(batch_dim, n):
a = torch.eye(3, 3, dtype=dtype, device=device).reshape((1, 3, 3)).repeat(batch_dim, 1, 1)
a[n, -1, -1] = 0
b = torch.randn(batch_dim, 3, 1, dtype=dtype, device=device)
with self.assertRaisesRegex(torch.linalg.LinAlgError, rf'\(Batch element {n}\): The diagonal element 3 is zero'):
torch.linalg.solve(a, b)
for params in [(1, 0), (2, 0), (2, 1), (4, 0), (4, 2), (10, 2)]:
run_test_singular_input(*params)
# if out tensor with wrong shape is passed a warning is given
# matrix 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device).reshape((1, 2, 2)).repeat(2, 1, 1)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# if out tensor with wrong shape is passed a warning is given
# vector 'b' case
with warnings.catch_warnings(record=True) as w:
A = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, dtype=dtype, device=device)
out = torch.zeros(1, dtype=dtype, device=device)
# Trigger warning
torch.linalg.solve(A, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.solve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.solve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve(self, device, dtype):
for (k, n) in zip([2, 3, 5], [3, 5, 7]):
b, A = self.solve_test_helper((n, n), (n, k), device, dtype)
x = torch.solve(b, A)[0]
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve_batched(self, device, dtype):
def solve_batch_helper(A_dims, b_dims):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.solve(b[i], A[i])[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.solve(b, A)[0] # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve_batched_non_contiguous(self, device, dtype):
from numpy.linalg import solve
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
A = make_A(2, 2, 2).permute(1, 0, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device).permute(2, 1, 0)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve_batched_many_batches(self, device, dtype):
for A_dims, b_dims in zip([(256, 256, 5, 5), (3, 3)], [(5, 1), (512, 512, 3, 1)]):
b, A = self.solve_test_helper(A_dims, b_dims, device, dtype)
x, _ = torch.solve(b, A)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(x))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve_batched_broadcasting(self, device, dtype):
from numpy.linalg import solve
def run_test(A_dims, b_dims):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
b, A = self.solve_test_helper(A_batch_dims + (A_matrix_size, A_matrix_size), b_dims, device, dtype)
x, _ = torch.solve(b, A)
x_exp = solve(A.cpu().numpy(), b.cpu().numpy())
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_solve_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
lu = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got solution with dtype Int"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got lu with dtype Int"):
torch.solve(b, a, out=(out, lu))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
lu = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
out = torch.empty(0, dtype=dtype, device=device)
lu = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.solve(b, a, out=(out, lu))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve(self, device, dtype):
def run_test(a_shape, dims):
a = torch.randn(a_shape, dtype=dtype, device=device)
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_tensorsolve_empty(self, device, dtype):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
b = torch.empty(a.shape[:2], dtype=dtype, device=device)
x = torch.linalg.tensorsolve(a, b)
self.assertEqual(torch.tensordot(a, x, dims=len(x.shape)), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@precisionOverride({torch.float: 1e-4, torch.cfloat: 1e-4})
def test_tensorsolve_non_contiguous(self, device, dtype):
def run_test_permuted(a_shape, dims):
# check for permuted / transposed inputs
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a.movedim((0, 2), (-2, -1))
self.assertFalse(a.is_contiguous())
b = torch.randn(a.shape[:2], dtype=dtype, device=device)
b = b.t()
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, dims):
# check for inputs with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
b = torch.randn(a_shape[:2], dtype=dtype, device=device)
b = b[::2]
self.assertFalse(b.is_contiguous())
result = torch.linalg.tensorsolve(a, b, dims=dims)
expected = np.linalg.tensorsolve(a.cpu().numpy(), b.cpu().numpy(), axes=dims)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorsolve(a, b, dims=dims, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
a_shapes = [(2, 3, 6), (3, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_permuted(a_shape, d)
a_shapes = [(4, 3, 6), (6, 4, 4, 3)]
dims = [None, (0, 2)]
for a_shape, d in itertools.product(a_shapes, dims):
run_test_skipped_elements(a_shape, d)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32)
def test_tensorsolve_errors_and_warnings(self, device, dtype):
# tensorsolve expects the input that can be reshaped to a square matrix
a = torch.eye(2 * 3 * 4, dtype=dtype, device=device).reshape((2 * 3, 4, 2, 3, 4))
b = torch.randn(8, 4, dtype=dtype, device=device)
self.assertTrue(np.prod(a.shape[2:]) != np.prod(b.shape))
with self.assertRaisesRegex(RuntimeError, r'Expected self to satisfy the requirement'):
torch.linalg.tensorsolve(a, b)
# if non-empty out tensor with wrong shape is passed a warning is given
out = torch.empty_like(a)
b = torch.randn(6, 4, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorsolve(a, b, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorsolve(a, b, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorsolve(a, b, out=out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv(self, device, dtype):
def run_test(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check the out= variant
out = torch.empty_like(result)
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
# compare to NumPy output
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float: 1e-3, torch.cfloat: 1e-3})
def test_tensorinv_non_contiguous(self, device, dtype):
def run_test(a_shape, ind):
# check for permuted (transposed) case
a = torch.randn(a_shape, dtype=dtype, device=device)
permutation = list(range(0, a.ndim))
a = a.permute(permutation[ind:] + permutation[:ind])
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=a.ndim - ind)
expected = np.linalg.tensorinv(a_numpy, ind=a.ndim - ind)
self.assertEqual(result, expected)
def run_test_skipped_elements(a_shape, ind):
# check for input with skipped elements
a = torch.randn(a_shape, dtype=dtype, device=device)
a = a[::2]
self.assertFalse(a.is_contiguous())
a_numpy = a.cpu().numpy()
result = torch.linalg.tensorinv(a, ind=ind)
expected = np.linalg.tensorinv(a_numpy, ind=ind)
self.assertEqual(result, expected)
# check non-contiguous out
out = torch.empty(2 * result.shape[0], *result.shape[1:], dtype=dtype, device=device)[::2]
self.assertFalse(out.is_contiguous())
ans = torch.linalg.tensorinv(a, ind=ind, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, result)
run_test((12, 3, 4), ind=1)
run_test((3, 8, 24), ind=2)
run_test((18, 3, 3, 2), ind=1)
run_test((1, 4, 2, 2), ind=2)
run_test((2, 3, 5, 30), ind=3)
run_test((24, 2, 2, 3, 2), ind=1)
run_test((3, 4, 2, 3, 2), ind=2)
run_test((1, 2, 3, 2, 3), ind=3)
run_test((3, 2, 1, 2, 12), ind=4)
run_test_skipped_elements((12, 3, 2), ind=1)
run_test_skipped_elements((18, 3, 3, 1), ind=1)
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_empty(self, device, dtype):
for ind in range(1, 4):
# Check for empty inputs. NumPy does not work for these cases.
a = torch.empty(0, 0, 1, 2, 3, 0, dtype=dtype, device=device)
a_inv = torch.linalg.tensorinv(a, ind=ind)
self.assertEqual(a_inv.shape, a.shape[ind:] + a.shape[:ind])
@skipMeta # See https://github.com/pytorch/pytorch/issues/53739
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_errors_and_warnings(self, device, dtype):
def check_shape(a_shape, ind):
# tensorinv requires the input to satisfy
# prod(a.shape[ind:]) == prod(a.shape[:ind])
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected self to satisfy the requirement"):
torch.linalg.tensorinv(a, ind=ind)
def check_ind(a_shape, ind):
a = torch.randn(a_shape, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "Expected a strictly positive integer"):
torch.linalg.tensorinv(a, ind=ind)
def check_out(a_shape, ind):
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(a_shape, dtype=dtype, device=device)
out = torch.empty_like(a)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.tensorinv(a, ind=ind, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.tensorinv(a, ind=ind, out=out)
# test for invalid shape
check_shape((2, 3, 4), ind=1)
check_shape((1, 2, 3, 4), ind=3)
# test for invalid ind
check_ind((12, 3, 4), ind=-1)
check_ind((18, 3, 3, 2), ind=0)
# test for invalid out tensor
check_out((12, 3, 4), ind=1)
check_out((3, 8, 24), ind=2)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_tensorinv_singular_input(self, device, dtype):
def check_singular_input(a_shape, ind):
prod_ind_end = np.prod(a_shape[ind:])
a = torch.eye(prod_ind_end, dtype=dtype, device=device)
a[-1, -1] = 0 # Now `a` is singular
a = a.reshape(a_shape)
with self.assertRaisesRegex(torch.linalg.LinAlgError, "The diagonal element"):
torch.linalg.tensorinv(a, ind=ind)
# test for non-invertible input
check_singular_input((12, 3, 4), ind=1)
check_singular_input((3, 6, 18), ind=2)
def _test_dot_vdot_vs_numpy(self, device, dtype, torch_fn, np_fn):
def check(x, y):
# Compare with numpy
res = torch_fn(x, y)
if x.dtype == torch.bfloat16:
ref = torch.from_numpy(np.array(np_fn(x.cpu().float().numpy(), y.cpu().float().numpy())))
else:
ref = torch.from_numpy(np.array(np_fn(x.cpu().numpy(), y.cpu().numpy())))
if res.dtype == torch.bfloat16:
self.assertEqual(res.cpu(), ref.bfloat16())
else:
self.assertEqual(res.cpu(), ref)
# Test out variant
out = torch.empty_like(res)
torch_fn(x, y, out=out)
self.assertEqual(out, res)
# Empty
x = torch.tensor([], dtype=dtype, device=device)
y = torch.tensor([], dtype=dtype, device=device)
check(x, y)
# Contiguous
x = 0.1 * torch.randn(5000, dtype=dtype, device=device)
y = 0.1 * torch.randn(5000, dtype=dtype, device=device)
check(x, y)
# 0 strided
y = 0.1 * torch.randn(1, dtype=dtype, device=device).expand(5000)
check(x, y)
# 2 strided
check(x[::2], y[::2])
@dtypes(torch.float, torch.cfloat, torch.bfloat16)
@dtypesIfCUDA(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5, torch.bfloat16: 1e-0})
def test_dot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.dot, np.dot)
@dtypes(torch.float, torch.cfloat)
@precisionOverride({torch.cfloat: 1e-4, torch.float32: 5e-5})
def test_vdot_vs_numpy(self, device, dtype):
self._test_dot_vdot_vs_numpy(device, dtype, torch.vdot, np.vdot)
def _test_dot_vdot_invalid_args(self, device, torch_fn, complex_dtypes=False):
def check(x, y, regex):
with self.assertRaisesRegex(RuntimeError, regex):
torch_fn(x, y)
if complex_dtypes:
x = torch.randn(1, dtype=torch.cfloat, device=device)
y = torch.randn(3, dtype=torch.cdouble, device=device)
else:
x = torch.randn(1, dtype=torch.float, device=device)
y = torch.randn(3, dtype=torch.double, device=device)
check(x, y, 'dot : expected both vectors to have same dtype')
check(x.reshape(1, 1), y, '1D tensors expected')
check(x.expand(9), y.to(x.dtype), 'inconsistent tensor size')
if self.device_type != 'cpu':
x_cpu = x.expand(3).cpu()
check(x_cpu, y.to(x.dtype), 'Expected all tensors to be on the same device')
@onlyNativeDeviceTypes
def test_vdot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.vdot)
self._test_dot_vdot_invalid_args(device, torch.vdot, complex_dtypes=True)
@onlyNativeDeviceTypes
def test_dot_invalid_args(self, device):
self._test_dot_vdot_invalid_args(device, torch.dot)
self._test_dot_vdot_invalid_args(device, torch.dot, complex_dtypes=True)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
# check against NumPy
self.assertEqual(rank_a, np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
self.assertEqual(rank_aaH, np.linalg.matrix_rank(aaH.cpu().numpy()))
self.assertEqual(matrix_rank(aaH, 0.01), np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01))
# hermitian flag for NumPy was added in 1.14.0
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(rank_aaH_hermitian,
np.linalg.matrix_rank(aaH.cpu().numpy(), hermitian=True))
self.assertEqual(matrix_rank(aaH, 0.01, True),
np.linalg.matrix_rank(aaH.cpu().numpy(), 0.01, True))
# check out= variant
out = torch.empty(a.shape[:-2], dtype=torch.int64, device=device)
ans = matrix_rank(a, out=out)
self.assertEqual(ans, out)
self.assertEqual(ans, rank_a)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_atol(self, device, dtype):
def run_test_atol(shape0, shape1, batch):
a = make_tensor((*batch, shape0, shape1), dtype=dtype, device=device)
# Check against NumPy output
# Test float tol, and specific value for each matrix
tolerances = [float(torch.rand(1)), ]
# Test different types of tol tensor
for tol_type in all_types():
tolerances.append(make_tensor(a.shape[:-2], dtype=tol_type, device=device, low=0))
# Test broadcasting of tol
if a.ndim > 2:
tolerances.append(make_tensor(a.shape[-3], dtype=torch.float32, device=device, low=0))
for tol in tolerances:
actual = torch.linalg.matrix_rank(a, atol=tol)
actual_tol = torch.linalg.matrix_rank(a, tol=tol)
self.assertEqual(actual, actual_tol)
numpy_tol = tol if isinstance(tol, float) else tol.cpu().numpy()
expected = np.linalg.matrix_rank(a.cpu().numpy(), tol=numpy_tol)
self.assertEqual(actual, expected)
shapes = (3, 13)
batches = ((), (0, ), (4, ), (3, 5, ))
for (shape0, shape1), batch in zip(itertools.product(shapes, reversed(shapes)), batches):
run_test_atol(shape0, shape1, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float64)
def test_matrix_rank_atol_rtol(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
# creates a matrix with singular values rank=n and singular values in range [2/3, 3/2]
# the singular values are 1 + 1/2, 1 - 1/3, 1 + 1/4, 1 - 1/5, ...
n = 9
a = make_arg(n, n)
# test float and tensor variants
for tol_value in [0.81, torch.tensor(0.81, device=device)]:
# using rtol (relative tolerance) takes into account the largest singular value (1.5 in this case)
result = torch.linalg.matrix_rank(a, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above 1.5*0.81 = 1.215
# atol is used directly to compare with singular values
result = torch.linalg.matrix_rank(a, atol=tol_value)
self.assertEqual(result, 7) # there are 7 singular values above 0.81
# when both are specified the maximum tolerance is used
result = torch.linalg.matrix_rank(a, atol=tol_value, rtol=tol_value)
self.assertEqual(result, 2) # there are 2 singular values above max(0.81, 1.5*0.81)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_empty(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
# NumPy doesn't work for input with no elements
def run_test(shape0, shape1, batch):
a = torch.randn(*batch, shape0, shape1, dtype=dtype, device=device)
rank_a = matrix_rank(a)
expected = torch.zeros(batch, dtype=torch.int64, device=device)
self.assertEqual(rank_a, matrix_rank(a.mH))
aaH = torch.matmul(a, a.mH)
rank_aaH = matrix_rank(aaH)
rank_aaH_hermitian = matrix_rank(aaH, hermitian=True)
self.assertEqual(rank_aaH, rank_aaH_hermitian)
aHa = torch.matmul(a.mH, a)
self.assertEqual(matrix_rank(aHa), matrix_rank(aHa, hermitian=True))
self.assertEqual(rank_a, expected)
self.assertEqual(matrix_rank(a, 0.01), expected)
self.assertEqual(rank_aaH, expected)
self.assertEqual(matrix_rank(aaH, 0.01), expected)
self.assertEqual(rank_aaH_hermitian, expected)
self.assertEqual(matrix_rank(aaH, 0.01, True), expected)
batches = ((), (4, ), (3, 5, ))
for batch in batches:
run_test(0, 0, batch)
run_test(0, 3, batch)
run_test(3, 0, batch)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.bool, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Bool"):
torch.linalg.matrix_rank(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.matrix_rank(a, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(3, dtype=dtype, device=device)
# Trigger warning
torch.linalg.matrix_rank(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_matrix_rank_basic(self, device, dtype):
matrix_rank = torch.linalg.matrix_rank
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(matrix_rank(a).item(), 10)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 10)
a[5, 5] = 0
self.assertEqual(matrix_rank(a).item(), 9)
self.assertEqual(matrix_rank(a, hermitian=True).item(), 9)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_old_matrix_rank(self, device, dtype):
a = torch.eye(10, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a).item(), 10)
self.assertEqual(torch.matrix_rank(a, True).item(), 10)
a[5, 5] = 0
self.assertEqual(torch.matrix_rank(a).item(), 9)
self.assertEqual(torch.matrix_rank(a, True).item(), 9)
a = torch.randn(24, 42, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), torch.matrix_rank(a.t()))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), torch.matrix_rank(aaT, True))
aTa = torch.mm(a.conj().t(), a)
self.assertEqual(torch.matrix_rank(aTa), torch.matrix_rank(aTa, True))
a = torch.randn(35, 75, dtype=dtype, device=device)
self.assertEqual(torch.matrix_rank(a), np.linalg.matrix_rank(a.cpu().numpy()))
self.assertEqual(torch.matrix_rank(a, 0.01), np.linalg.matrix_rank(a.cpu().numpy(), 0.01))
aaT = torch.mm(a, a.conj().t())
self.assertEqual(torch.matrix_rank(aaT), np.linalg.matrix_rank(aaT.cpu().numpy()))
self.assertEqual(torch.matrix_rank(aaT, 0.01), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01))
if np.lib.NumpyVersion(np.__version__) >= '1.14.0':
self.assertEqual(torch.matrix_rank(aaT, True), np.linalg.matrix_rank(aaT.cpu().numpy(), True))
self.assertEqual(torch.matrix_rank(aaT, 0.01, True), np.linalg.matrix_rank(aaT.cpu().numpy(), 0.01, True))
@onlyNativeDeviceTypes
@dtypes(torch.double)
# This tests only the cases where torch.chain_matmul differs from torch.linalg.multi_dot which this is an "alias" for.
def test_chain_matmul(self, device, dtype):
# chain_matmul accepts a single input tensor while multi_dot does not
t = make_tensor((2, 2), device, dtype)
self.assertEqual(t, torch.chain_matmul(t))
with self.assertRaisesRegex(RuntimeError, r"chain_matmul\(\): Expected one or more matrices"):
torch.chain_matmul()
# chain_matmul expects all tensors to be 2D whereas multi_dot allows the first and last tensors to
# be either 1D or 2D
with self.assertRaisesRegex(RuntimeError, r"Tensor dimension is 1, expected 2 instead"):
torch.chain_matmul(make_tensor(1, device, dtype), make_tensor(1, device, dtype))
@onlyNativeDeviceTypes
@dtypes(torch.double, torch.cdouble)
def test_multi_dot(self, device, dtype):
def check(*shapes, noncontiguous=False):
tensors = [make_tensor(shape, device, dtype, noncontiguous=noncontiguous) for shape in shapes]
np_arrays = [tensor.cpu().numpy() for tensor in tensors]
res = torch.linalg.multi_dot(tensors).cpu()
ref = torch.from_numpy(np.array(np.linalg.multi_dot(np_arrays)))
self.assertEqual(res, ref)
# test for inputs with empty dimensions
check([0], [0])
check([2], [2, 0])
check([1, 0], [0])
check([0, 2], [2, 1])
check([2, 2], [2, 0])
check([2, 0], [0, 3])
check([0, 0], [0, 1])
check([4, 2], [2, 0], [0, 3], [3, 2])
# test variable output shapes
check([2], [2])
check([1, 2], [2])
check([2], [2, 1])
check([1, 2], [2, 1])
check([3, 2], [2, 4])
# test multiple input tensors
check([3], [3, 4], [4, 2], [2, 5], [5])
check([1, 2], [2, 2], [2, 3], [3, 1])
# test large tensors
check([10, 100], [100, 5], [5, 50])
check([10, 20], [20, 30], [30, 5])
# test noncontiguous input
check([3, 2], [2, 2], [2, 3], [3, 4], noncontiguous=True)
check([15, 5], [5, 10], [10, 20], [20, 25], noncontiguous=True)
@onlyNativeDeviceTypes
@dtypes(torch.float)
def test_multi_dot_errors(self, device, dtype):
def check(tensors, out, msg):
with self.assertRaisesRegex(RuntimeError, msg):
torch.linalg.multi_dot(tensors, out=out)
a = make_tensor(2, device, dtype)
check([], None, "expected at least 2 tensors")
check([a], None, "expected at least 2 tensors")
check([torch.tensor(1, device=device, dtype=dtype), a], None, "the first tensor must be 1D or 2D")
check([a, torch.tensor(1, device=device, dtype=dtype)], None, "the last tensor must be 1D or 2D")
check([a, a, a], None, "tensor 1 must be 2D")
check([a, make_tensor((2, 2, 2), device, dtype), a], None, "tensor 1 must be 2D")
check([a, make_tensor(2, device, torch.double)], None, "all tensors must have be the same dtype")
check([a, a], torch.empty(0, device=device, dtype=torch.double), "expected out tensor to have dtype")
if self.device_type == 'cuda':
check([a, make_tensor(2, 'cpu', dtype)], None, "all tensors must be on the same device")
check([a, a], torch.empty(0, dtype=dtype), "expected out tensor to be on device")
check([a, make_tensor(3, device, dtype)], None, "cannot be multiplied")
check([a, make_tensor((3, 2), device, dtype), a], None, "cannot be multiplied")
@precisionOverride({torch.float32: 5e-6, torch.complex64: 5e-6})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_qr(self, device, dtype):
def run_test(tensor_dims, some):
A = torch.randn(*tensor_dims, dtype=dtype, device=device)
Q, R = torch.qr(A, some=some)
# Check0: Q[-2:] = (m, n_columns), R[-2:] = (n_columns, n)
m, n = tensor_dims[-2:]
n_columns = m if (not some) and m > n else min(m, n)
self.assertEqual(Q.size(-2), m)
self.assertEqual(R.size(-1), n)
self.assertEqual(Q.size(-1), n_columns)
A_ = A.cpu().numpy()
Q_ = Q.cpu().numpy()
R_ = R.cpu().numpy()
# Check1: A = QR
self.assertEqual(A_, np.matmul(Q_, R_))
# Check2: A = QR (with out)
Q_out, R_out = torch.full_like(Q, math.nan), torch.full_like(R, math.nan)
torch.qr(A, some=some, out=(Q_out, R_out))
Q_out_ = Q_out.cpu().numpy()
R_out_ = R_out.cpu().numpy()
self.assertEqual(A_, np.matmul(Q_out_, R_out_))
# Check3: Q == Q_out, R == R_out
self.assertEqual(Q_, Q_out_)
self.assertEqual(R_, R_out_)
# Check4: Q^{T}Q = I, triu(R) = R
eye = torch.eye(n_columns, device=device, dtype=dtype).expand(Q.shape[:-2] + (n_columns, n_columns)).cpu().numpy()
self.assertEqual(np.matmul(Q_.swapaxes(-1, -2).conj(), Q_), eye)
self.assertEqual(R.triu(), R)
tensor_dims_list = [(0, 5), (0, 0), (5, 0), # Empty Tensors
(2, 1, 0, 5), (2, 1, 0, 0), (2, 1, 5, 0), (2, 0, 5, 5), # Batched empty Tensors
(3, 5), (5, 5), (5, 3), # Single matrix
(7, 3, 5), (7, 5, 5), (7, 5, 3), # 3-dim Tensors
(7, 5, 3, 5), (7, 5, 5, 5), (7, 5, 5, 3)] # 4-dim Tensors
for tensor_dims, some in itertools.product(tensor_dims_list, [True, False]):
run_test(tensor_dims, some)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_vs_numpy(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np.linalg.qr(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
#
# for mode='r' we need a special logic because numpy returns only r
exp_r = np.linalg.qr(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_linalg_qr_autograd_errors(self, device, dtype):
# torch.linalg.qr(mode='r') returns only 'r' and discards 'q', but
# without 'q' you cannot compute the backward pass. Check that
# linalg_qr_backward complains cleanly in that case.
inp = torch.randn((5, 7), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='r')
self.assertEqual(q.shape, (0,)) # empty tensor
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='r'"):
b.backward()
#
inp = torch.randn((7, 5), device=device, dtype=dtype, requires_grad=True)
q, r = torch.linalg.qr(inp, mode='complete')
b = torch.sum(r)
with self.assertRaisesRegex(RuntimeError,
"The derivative of qr is not implemented when mode='complete' and nrows > ncols"):
b.backward()
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_batched(self, device, dtype):
"""
test torch.linalg.qr vs numpy.linalg.qr. We need some special logic
because numpy does not support batched qr
"""
def np_qr_batched(a, mode):
"""poor's man batched version of np.linalg.qr"""
all_q = []
all_r = []
for matrix in a:
result = np.linalg.qr(matrix, mode=mode)
if mode == 'r':
all_r.append(result)
else:
q, r = result
all_q.append(q)
all_r.append(r)
if mode == 'r':
return np.array(all_r)
else:
return np.array(all_q), np.array(all_r)
t = torch.randn((3, 7, 5), device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete']:
exp_q, exp_r = np_qr_batched(np_t, mode=mode)
q, r = torch.linalg.qr(t, mode=mode)
self.assertEqual(q, exp_q)
self.assertEqual(r, exp_r)
# for mode='r' we need a special logic because numpy returns only r
exp_r = np_qr_batched(np_t, mode='r')
q, r = torch.linalg.qr(t, mode='r')
# check that q is empty
self.assertEqual(q.shape, (0,))
self.assertEqual(q.dtype, t.dtype)
self.assertEqual(q.device, t.device)
# check r
self.assertEqual(r, exp_r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_qr_out(self, device, dtype):
"""
test torch.linalg.qr(out=...) vs torch.lingalg.qr
"""
sizes_to_test = [
(7, 5),
(5, 7),
(5, 0), # empty
(0, 5), # empty
]
for size in sizes_to_test:
t = torch.randn(size, device=device, dtype=dtype)
np_t = t.cpu().numpy()
for mode in ['reduced', 'complete', 'r']:
q, r = torch.linalg.qr(t, mode=mode)
out = (torch.empty((0), dtype=dtype, device=device),
torch.empty((0), dtype=dtype, device=device))
q2, r2 = torch.linalg.qr(t, mode=mode, out=out)
self.assertIs(q2, out[0])
self.assertIs(r2, out[1])
self.assertEqual(q2, q)
self.assertEqual(r2, r)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float)
def test_qr_error_cases(self, device, dtype):
t1 = torch.randn(5, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, 'qr input should have at least 2 dimensions, but has 1 dimensions instead'):
torch.linalg.qr(t1)
t2 = torch.randn((5, 7), device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "qr received unrecognized mode 'hello'"):
torch.linalg.qr(t2, mode='hello')
def _check_einsum(self, *args, np_args=None):
if np_args is None:
np_args = [arg.cpu().numpy() if isinstance(arg, torch.Tensor) else arg for arg in args]
res = torch.einsum(*args)
ref = np.einsum(*np_args)
self.assertEqual(torch.from_numpy(np.array(ref)), res)
@dtypes(torch.double, torch.cdouble)
def test_einsum(self, device, dtype):
# Test cases from https://gist.github.com/rockt/15ee013889d65342088e9260a377dc8f
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 3, 5), device, dtype)
D = make_tensor((2, 5, 7), device, dtype)
E = make_tensor((7, 9), device, dtype)
F = make_tensor((2, 3, 3, 5), device, dtype)
G = make_tensor((5, 4, 6), device, dtype)
H = make_tensor((4, 4), device, dtype)
I = make_tensor((2, 3, 2), device, dtype)
# Vector operations
self._check_einsum('i->', x) # sum
self._check_einsum('i,i->', x, x) # dot
self._check_einsum('i,i->i', x, x) # vector element-wisem mul
self._check_einsum('i,j->ij', x, y) # outer
# Matrix operations
self._check_einsum("ij->ji", A) # transpose
self._check_einsum("ij->j", A) # row sum
self._check_einsum("ij->i", A) # col sum
self._check_einsum("ij,ij->ij", A, A) # matrix element-wise mul
self._check_einsum("ij,j->i", A, x) # matrix vector multiplication
self._check_einsum("ij,kj->ik", A, B) # matmul
self._check_einsum("ij,ab->ijab", A, E) # matrix outer product
# Tensor operations
self._check_einsum("Aij,Ajk->Aik", C, D) # batch matmul
self._check_einsum("ijk,jk->i", C, A) # tensor matrix contraction
self._check_einsum("aij,jk->aik", D, E) # tensor matrix contraction
self._check_einsum("abCd,dFg->abCFg", F, G) # tensor tensor contraction
self._check_einsum("ijk,jk->ik", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,jk->ij", C, A) # tensor matrix contraction with double indices
self._check_einsum("ijk,ik->j", C, B) # non contiguous
self._check_einsum("ijk,ik->jk", C, B) # non contiguous with double indices
# Test diagonals
self._check_einsum("ii", H) # trace
self._check_einsum("ii->i", H) # diagonal
self._check_einsum('iji->j', I) # non-contiguous trace
self._check_einsum('ngrg...->nrg...', make_tensor((2, 1, 3, 1, 4), device, dtype))
# Test ellipsis
self._check_einsum("i...->...", H)
self._check_einsum("ki,...k->i...", A.t(), B)
self._check_einsum("k...,jk->...", A.t(), B)
self._check_einsum('...ik, ...j -> ...ij', C, x)
self._check_einsum('Bik,k...j->i...j', C, make_tensor((5, 3), device, dtype))
self._check_einsum('i...j, ij... -> ...ij', C, make_tensor((2, 5, 2, 3), device, dtype))
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum("bn,anm,bm->ba", l, w, r)
# with strided tensors
self._check_einsum("bn,Anm,bm->bA", l[:, ::2], w[:, ::2, ::2], r[:, ::2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_sublist_format(self, device, dtype):
x = make_tensor((5,), device, dtype)
y = make_tensor((7,), device, dtype)
A = make_tensor((3, 5), device, dtype)
B = make_tensor((2, 5), device, dtype)
C = make_tensor((2, 1, 3, 1, 4), device, dtype)
self._check_einsum(x, [0])
self._check_einsum(x, [0], [])
self._check_einsum(x, [0], y, [1], [0, 1])
self._check_einsum(A, [0, 1], [1, 0])
self._check_einsum(A, [0, 1], x, [1], [0])
self._check_einsum(A, [0, 1], B, [2, 1])
self._check_einsum(A, [0, 1], B, [2, 1], [0, 2])
self._check_einsum(C, [0, 1, 2, 1, Ellipsis], [0, 2, 1, Ellipsis])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0])
self._check_einsum(A.t(), [0, 1], B, [Ellipsis, 0], [1, Ellipsis])
self._check_einsum(A.t(), [0, Ellipsis], B, [1, 0], [Ellipsis])
# torch.bilinear with noncontiguous tensors
l = make_tensor((5, 10), device, dtype, noncontiguous=True)
r = make_tensor((5, 20), device, dtype, noncontiguous=True)
w = make_tensor((15, 10, 20), device, dtype)
self._check_einsum(l, [40, 41], w, [2, 41, 50], r, [40, 50], [40, 2])
@dtypes(torch.double, torch.cdouble)
def test_einsum_random(self, device, dtype):
def convert_label(label):
if label == ...:
return '...'
elif label < 26:
return chr(ord('A') + label)
else:
return chr(ord('a') + label - 26)
def convert_sublist(sublist):
return ''.join(convert_label(label) for label in sublist)
def test(n=10, # how many tests to generate
n_labels=5, # how many labels available
min_ops=1, max_ops=3, # min and max number of operands per test
min_dims=1, max_dims=3, # min and max number of dimensions per operand
min_size=1, max_size=8, # min and max size of each dimension
max_out_dim=3, # max number of dimensions for the output
enable_diagonals=True, # controls if labels can be repeated for diagonals
ellipsis_prob=0.5, # probability of including ellipsis in operand
broadcasting_prob=0.1): # probability of turning some dim sizes 1 for broadcasting
all_labels = torch.arange(52)
assert 0 <= n
assert 0 <= n_labels < len(all_labels)
assert 0 < min_ops <= max_ops
assert 0 <= min_dims <= max_dims
assert 0 <= min_size <= max_size
assert 0 <= max_out_dim
assert enable_diagonals or max_dims <= n_labels
for _ in range(n):
# Select a subset of labels for this test and give them random sizes
possible_labels = all_labels[torch.randperm(len(all_labels))[:n_labels]]
labels_size = torch.randint_like(all_labels, min_size, max_size + 1)
ellipsis_shape = torch.randint(min_size, max_size + 1, (max_dims - min_dims,))
operands = []
sublists = []
ell_size = 0
valid_labels = set()
# create random input operands
for _ in range(random.randint(min_ops, max_ops)):
n_dim = random.randint(min_dims, max_dims)
labels_idx = torch.ones(len(possible_labels)).multinomial(n_dim, enable_diagonals)
labels = possible_labels[labels_idx]
valid_labels.update(labels.tolist())
shape = labels_size[labels]
# turn some dimensions to size 1 for testing broadcasting
mask = Binomial(probs=broadcasting_prob).sample((n_dim,))
broadcast_labels = torch.unique(labels[mask == 1])
shape[(labels[..., None] == broadcast_labels).any(-1)] = 1
labels = labels.tolist()
shape = shape.tolist()
# include ellipsis if not all dimensions were assigned a label already
if n_dim < max_dims and torch.rand(1) < ellipsis_prob:
ell_num_dim = random.randint(1, max_dims - n_dim)
ell_size = max(ell_size, ell_num_dim)
ell_shape = ellipsis_shape[-ell_num_dim:]
# again, turn some dimensions to size 1 for broadcasting
mask = Binomial(probs=broadcasting_prob).sample((ell_num_dim,))
ell_shape[mask == 1] = 1
ell_index = random.randint(0, n_dim)
shape[ell_index:ell_index] = ell_shape
labels.insert(ell_index, ...)
operands.append(make_tensor(shape, device, dtype))
sublists.append(labels)
# NumPy has a bug with the sublist format so for now we compare PyTorch sublist
# implementation against the equation format implementation of NumPy
# see https://github.com/numpy/numpy/issues/10926
np_operands = [op.cpu().numpy() for op in operands]
# test equation format
equation = ','.join(convert_sublist(l) for l in sublists)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format
args = [*itertools.chain(*zip(operands, sublists))]
self._check_einsum(*args, np_args=(equation, *np_operands))
# generate an explicit output
out_sublist = []
num_out_labels = max(0, random.randint(0, min(max_out_dim, len(valid_labels))) - ell_size)
if num_out_labels > 0:
out_labels_idx = torch.ones(len(valid_labels)).multinomial(num_out_labels)
out_sublist = torch.tensor(list(valid_labels))[out_labels_idx].tolist()
out_sublist.insert(random.randint(0, num_out_labels), ...)
# test equation format with explicit output
equation += '->' + convert_sublist(out_sublist)
self._check_einsum(equation, *operands, np_args=(equation, *np_operands))
# test sublist format with explicit output
args.append(out_sublist)
self._check_einsum(*args, np_args=(equation, *np_operands))
test(100)
def test_einsum_corner_cases(self, device):
def check(equation, *operands, expected_output):
tensors = [torch.tensor(operand, device=device, dtype=torch.float32) if not isinstance(operand, tuple)
else make_tensor(operand, device, torch.float32) for operand in operands]
output = torch.einsum(equation, tensors)
self.assertEqual(output, torch.tensor(expected_output, dtype=torch.float32, device=device))
# Test equation variantions
check(' ', 1, expected_output=1)
check(' -> ', 1, expected_output=1)
check(' , ', 2, 2, expected_output=4)
check(' , , ', 2, 2, 2, expected_output=8)
check(' , -> ', 2, 2, expected_output=4)
check(' i ', [1], expected_output=[1])
check(' i -> ', [1], expected_output=1)
check(' i -> i ', [1], expected_output=[1])
check(' i , i ', [2], [2], expected_output=4)
check(' i , i -> i ', [2], [2], expected_output=[4])
# Test tensors with 0 size dimensions
check('i', [], expected_output=[])
check(' i j -> j', [[], []], expected_output=[])
check('ij->i', [[], []], expected_output=[0., 0.])
check(' i j k , k -> i j ', (3, 0, 6), (6,), expected_output=[[], [], []])
# Test broadcasting
check('i,j', [2], [1, 2], expected_output=[[2, 4]])
check('i,ij->ij', [1, 2], [[1, 2, 3], [2, 3, 4]], expected_output=[[1, 2, 3], [4, 6, 8]])
# Test ellipsis broadcasting
check('...', 1, expected_output=1)
check('...->', 1, expected_output=1)
check('...->...', 1, expected_output=1)
check('...', [1], expected_output=[1])
check('...->', [1], expected_output=1)
check('z...->z', [1], expected_output=[1])
check('Z...->...Z', [1], expected_output=[1])
check('...a->', [[2], [4]], expected_output=6)
check('a...b->ab', [[[1], [2]], [[3], [4]]], expected_output=[[3], [7]])
def test_einsum_error_cases(self, device):
def check(*args, regex, exception=RuntimeError):
with self.assertRaisesRegex(exception, r'einsum\(\):.*' + regex):
torch.einsum(*args)
x = make_tensor((2,), device, torch.float32)
y = make_tensor((2, 3), device, torch.float32)
check('', [], regex=r'at least one operand', exception=ValueError)
check('. ..', [x], regex=r'found \'.\' for operand 0 that is not part of any ellipsis')
check('... ...', [x], regex=r'found \'.\' for operand 0 for which an ellipsis was already found')
check('1', [x], regex=r'invalid subscript given at index 0')
check(',', [x], regex=r'fewer operands were provided than specified in the equation')
check('', [x, x], regex=r'more operands were provided than specified in the equation')
check('', [x], regex=r'the number of subscripts in the equation \(0\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai', [x], regex=r'the number of subscripts in the equation \(2\) does not match the number '
r'of dimensions \(1\) for operand 0 and no ellipsis was given')
check('ai...', [x], regex=r'the number of subscripts in the equation \(2\) is more than the number '
r'of dimensions \(1\) for operand 0')
check('a->... .', [x], regex=r'found \'.\' for output but an ellipsis \(...\) was already found')
check('a->..', [x], regex=r'found \'.\' for output that is not part of any ellipsis \(...\)')
check('a->1', [x], regex=r'invalid subscript given at index 3')
check('a->aa', [x], regex=r'output subscript a appears more than once in the output')
check('a->i', [x], regex=r'output subscript i does not appear in the equation for any input operand')
check('aa', [y], regex=r'subscript a is repeated for operand 0 but the sizes don\'t match, 3 != 2')
check('a, ba', [x, y], regex=r'operands do not broadcast with remapped shapes \[original->remapped\]: '
r'\[2\]->\[1, 2\] \[2, 3\]->\[2, 3\]')
check(x, [-1], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
check(x, [52], regex=r'not within the valid range \[0, 52\)', exception=ValueError)
def _gen_shape_inputs_linalg_triangular_solve(self, shape, dtype, device, well_conditioned=False):
make_arg = partial(make_tensor, dtype=dtype, device=device)
make_randn = partial(torch.randn, dtype=dtype, device=device)
b, n, k = shape
for left, uni, expand_a, tr_a, conj_a, expand_b, tr_b, conj_b in product((True, False), repeat=8):
# expand means that we generate a batch of matrices with a stride of zero in the batch dimension
if (conj_a or conj_b) and not dtype.is_complex:
continue
# We just expand on the batch size
if (expand_a or expand_b) and b == 1:
continue
size_a = (b, n, n) if left else (b, k, k)
size_b = (b, n, k) if not tr_b else (b, k, n)
# If expand_a or expand_b, we'll expand them to the correct size later
if b == 1 or expand_a:
size_a = size_a[1:]
if b == 1 or expand_b:
size_b = size_b[1:]
if well_conditioned:
PLU = torch.lu_unpack(*torch.lu(make_randn(*size_a)))
if uni:
# A = L from PLU
A = PLU[1].transpose(-2, -1).contiguous()
else:
# A = U from PLU
A = PLU[2].contiguous()
else:
A = make_arg(size_a)
A.triu_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_b)
if tr_a:
A.transpose_(-2, -1)
if tr_b:
B.transpose_(-2, -1)
if conj_a:
A = A.conj()
if conj_b:
B = B.conj()
if expand_a:
A = A.expand(b, *size_a)
if expand_b:
B = B.expand(b, n, k)
yield A, B, left, not tr_a, uni
def _test_linalg_solve_triangular(self, A, B, upper, left, uni):
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
self.assertEqual(A @ X, B)
else:
self.assertEqual(X @ A, B)
out = B
# B may be expanded
if not B.is_contiguous() and not B.transpose(-2, -1).is_contiguous():
out = B.clone()
torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni, out=out)
self.assertEqual(X, out)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-1, torch.complex64: 1e-1,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular(self, device, dtype):
# This exercises the API + BLAS CPU + batched cuBLAS
ks = (3, 1, 0)
ns = (5, 0)
bs = (1, 2, 0)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for b, n, k in product(bs, ns, ks):
for A, B, left, upper, uni in gen_inputs((b, n, k), dtype, device):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@onlyCUDA
@skipCUDAIfNoMagma # Magma needed for the PLU decomposition
@skipCUDAIfRocm # There is a memory access bug in rocBLAS in the (non-batched) solve_triangular
@skipCUDAVersionIn([(11, 3), (11, 5)]) # Tracked in https://github.com/pytorch/pytorch/issues/70111
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_large(self, device, dtype):
# Exercises magma and cublas
magma = (9, 513, 1)
iterative_cublas = (2, 64, 1)
gen_inputs = self._gen_shape_inputs_linalg_triangular_solve
for shape in (magma, iterative_cublas):
for A, B, left, upper, uni in gen_inputs(shape, dtype, device, well_conditioned=True):
self._test_linalg_solve_triangular(A, B, upper, left, uni)
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-2, torch.complex64: 1e-2,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_linalg_solve_triangular_broadcasting(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
sizes = (((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)),
((2, 1, 3, 4, 4), (4, 6)),
((4, 4), (2, 1, 3, 4, 2)),
((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)))
for size_A, size_B in sizes:
for left, upper, uni in itertools.product([True, False], repeat=3):
A = make_arg(size_A)
if upper:
A.triu_()
else:
A.tril_()
diag = A.diagonal(0, -2, -1)
if uni:
diag.fill_(1.)
else:
diag[diag.abs() < 1e-6] = 1.
B = make_arg(size_B)
if not left:
B.transpose_(-2, -1)
X = torch.linalg.solve_triangular(A, B, upper=upper, left=left, unitriangular=uni)
if left:
B_other = A @ X
else:
B_other = X @ A
self.assertEqual(*torch.broadcast_tensors(B, B_other))
def triangular_solve_test_helper(self, A_dims, b_dims, upper, unitriangular,
device, dtype):
triangle_function = torch.triu if upper else torch.tril
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = torch.randn(*A_dims, dtype=dtype, device=device)
# create positive definite matrix
A = torch.matmul(A, A.mT)
A_triangular = triangle_function(A)
if unitriangular:
A_triangular.diagonal(dim1=-2, dim2=-1).fill_(1.)
return b, A_triangular
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve(self, device, dtype):
ks = [0, 1, 3]
ns = [0, 5]
for k, n, (upper, unitriangular, transpose) in itertools.product(ks, ns,
itertools.product([True, False], repeat=3)):
b, A = self.triangular_solve_test_helper((n, n), (n, k), upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper, unitriangular=unitriangular, transpose=transpose)[0]
if transpose:
self.assertEqual(b, np.matmul(A.t().cpu(), x.cpu()))
else:
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched(self, device, dtype):
def triangular_solve_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.triangular_solve(b[i], A[i], upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0])
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0] # Actual output
self.assertEqual(x_act, x_exp) # Equality check
if transpose:
A = A.mT
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
def triangular_solve_zero_batch_helper(A_dims, b_dims, upper, unitriangular, transpose):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x = torch.triangular_solve(b, A, upper=upper,
unitriangular=unitriangular,
transpose=transpose)[0]
self.assertTrue(x.shape == b.shape)
for upper, unitriangular, transpose in itertools.product([True, False], repeat=3):
batchsize = 3
triangular_solve_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
# test empty input
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 10),
upper, unitriangular, transpose)
triangular_solve_batch_helper((batchsize, 0, 0), (batchsize, 0, 0),
upper, unitriangular, transpose)
# test zero batch case
batchsize = 0
triangular_solve_zero_batch_helper((batchsize, 5, 5), (batchsize, 5, 10),
upper, unitriangular, transpose)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_triangular_solve_batched_many_batches(self, device, dtype):
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test batched A case
b, A = self.triangular_solve_test_helper((256, 256, 5, 5), (5, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A,
upper=upper, transpose=transpose, unitriangular=unitriangular)
if transpose:
A = A.mT
Ax = torch.matmul(A, x)
rtol = 1e-2 if dtype in [torch.float32, torch.complex64] else self.precision
self.assertEqual(Ax, b.expand_as(Ax), atol=self.precision, rtol=rtol)
# test batched b case
b, A = self.triangular_solve_test_helper((3, 3), (512, 512, 3, 1),
upper, unitriangular, device, dtype)
x, _ = torch.triangular_solve(b, A, upper=upper, transpose=transpose,
unitriangular=unitriangular)
if transpose:
A = A.mT
self.assertEqual(torch.matmul(A, x), b)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@unittest.skipIf(not TEST_SCIPY, "SciPy not found")
@dtypes(*floating_and_complex_types())
def test_triangular_solve_batched_broadcasting(self, device, dtype):
from scipy.linalg import solve_triangular as tri_solve
def scipy_tri_solve_batched(A, B, upper, trans, diag):
batch_dims_A, batch_dims_B = A.shape[:-2], B.shape[:-2]
single_dim_A, single_dim_B = A.shape[-2:], B.shape[-2:]
expand_dims = tuple(torch._C._infer_size(torch.Size(batch_dims_A),
torch.Size(batch_dims_B)))
expand_A = np.broadcast_to(A, expand_dims + single_dim_A)
expand_B = np.broadcast_to(B, expand_dims + single_dim_B)
flat_A = expand_A.reshape((-1,) + single_dim_A)
flat_B = expand_B.reshape((-1,) + single_dim_B)
flat_X = np.vstack([tri_solve(a, b, lower=(not upper), trans=int(trans), unit_diagonal=diag)
for a, b in zip(flat_A, flat_B)])
return flat_X.reshape(expand_B.shape)
def run_test(A_dims, b_dims, device, upper, transpose, unitriangular):
b, A = self.triangular_solve_test_helper(A_dims, b_dims, upper,
unitriangular, device, dtype)
x_exp = torch.as_tensor(scipy_tri_solve_batched(A.cpu().numpy(), b.cpu().numpy(),
upper, transpose, unitriangular))
x = torch.triangular_solve(b, A, upper=upper, transpose=transpose, unitriangular=unitriangular)[0]
self.assertEqual(x, x_exp.to(device))
for upper, transpose, unitriangular in itertools.product([True, False], repeat=3):
# test against scipy.linalg.solve_triangular
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6), device, upper, transpose, unitriangular) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6), device, upper, transpose, unitriangular) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2), device, upper, transpose, unitriangular) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5), device, upper, transpose, unitriangular) # broadcasting A & b
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_triangular_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty_like(b).to(torch.int)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty_like(b)
clone_a = clone_a.to(torch.int)
with self.assertRaisesRegex(RuntimeError, "Expected out tensor to have dtype"):
torch.triangular_solve(b, a, out=(out, clone_a))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
clone_a = torch.empty_like(a)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
out = torch.empty(0, dtype=dtype, device=device)
clone_a = torch.empty_like(a).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.triangular_solve(b, a, out=(out, clone_a))
# Trigger the WARN_ONCE deprecation error
torch.triangular_solve(b, a)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
clone_a = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.triangular_solve(b, a, out=(out, clone_a))
# Check warning occurs
self.assertEqual(len(w), 2)
self.assertTrue("An output with one or more elements was resized" in str(w[0].message))
self.assertTrue("An output with one or more elements was resized" in str(w[1].message))
def check_single_matmul(self, x, y, shape):
a = np.array(x, copy=False)
b = np.array(y, copy=False)
expected = np.matmul(a, b)
ans = torch.matmul(x, y)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
out = torch.zeros(*shape, dtype=torch.int64).to(x.device)
ans = torch.matmul(x, y, out=out)
self.assertIs(ans, out)
self.assertTrue(ans.is_contiguous())
self.assertTrue(np.array_equal(ans, expected))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_1d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
n = 1
for m in range(1, 8):
for p in range(1, 8):
for o in range(1, 5):
# 1d, 3d, inner dimensions C
x = torch.arange(m, device=device)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions Fortran
x = torch.arange(m, device=device)
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 1d, 3d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 5):
# 1d, 4d, inner dimensions C
x = torch.arange(m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions Fortran
x = torch.arange(m)
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 1d, 4d, inner dimensions non-contiguous
x = torch.arange(2 * m, device=device)[::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
# TODO: update to run on CUDA, too
@onlyCPU
def test_matmul_small_brute_force_2d_Nd(self, device):
# Issue #20452: range(0, 10) does not work.
for n in range(1, 5):
for m in range(1, 5):
for p in range(1, 5):
for o in range(1, 3):
# 2d, 3d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(o * m * p, device=device).reshape(o, m, p)
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(o * p * m, device=device).reshape(o, p, m).mT
self.check_single_matmul(x, y, (o, n, p))
# 2d, 3d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(o * m * 2 * p, device=device).reshape(o, m, 2 * p)[:, :, ::2]
self.check_single_matmul(x, y, (o, n, p))
for r in range(1, 2):
# 2d, 4d, inner dimensions C
x = torch.arange(n * m, device=device).reshape(n, m)
y = torch.arange(r * o * m * p, device=device).reshape(r, o, m, p)
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions Fortran
x = torch.arange(m * n, device=device).reshape(m, n).mT
y = torch.arange(r * o * p * m, device=device).reshape(r, o, p, m).mT
self.check_single_matmul(x, y, (r, o, n, p))
# 2d, 4d, inner dimensions non-contiguous
x = torch.arange(n * 2 * m, device=device).reshape(n, 2 * m)[:, ::2]
y = torch.arange(r * o * m * 2 * p, device=device).reshape(r, o, m, 2 * p)[:, :, :, ::2]
self.check_single_matmul(x, y, (r, o, n, p))
def test_linear_algebra_scalar_raises(self, device) -> None:
m = torch.randn(5, 5, device=device)
v = torch.randn(5, device=device)
s = torch.tensor(7, device=device)
self.assertRaises(RuntimeError, lambda: torch.mv(m, s))
self.assertRaises(RuntimeError, lambda: torch.addmv(v, m, s))
@dtypes(torch.float32, torch.complex64)
def test_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.cross(x, y)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.cross(x, y, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross(self, device, dtype):
x = torch.rand(100, 3, 100, dtype=dtype, device=device)
y = torch.rand(100, 3, 100, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# test for broadcastable inputs
x = torch.rand(1, 3, 2, dtype=dtype, device=device)
y = torch.rand(4, 3, 1, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.linalg.cross(x, y, dim=1, out=res2)
self.assertEqual(res1, res2)
# non contiguous case 1
x = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
y = torch.rand((4, 4, 4, 3), dtype=dtype,
device=device).contiguous(memory_format=torch.channels_last) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=-1)
res = torch.linalg.cross(x, y, dim=-1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 2
x = torch.rand(1, 3, 2, dtype=dtype, device=device) # contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 3
x = torch.rand(2, 3, 1, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
y = torch.rand(1, 3, 4, dtype=dtype, device=device).permute(2, 1, 0) # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 4
x = torch.randn(12, 3, device=device, dtype=dtype)[::2, :] # non-contiguous
y = torch.randn(18, 3, device=device, dtype=dtype)[::3, :] # non-contiguous
np_expected_ref = np.cross(x.cpu().numpy(), y.cpu().numpy(), axis=1)
res = torch.linalg.cross(x, y, dim=1)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
# non contiguous case 5
x = torch.randn(1, device=device, dtype=dtype) # contiguous
y = torch.randn(6, device=device, dtype=dtype)[::2] # non-contiguous
np_expected_ref = np.cross(x.expand(3).cpu().numpy(), y.cpu().numpy())
res = torch.linalg.cross(x, y)
# numpy reference compared to torch result
self.assertEqual(res.cpu().numpy(), np_expected_ref)
@dtypes(torch.float32, torch.complex64)
def test_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.cross(x, y, dim=1)
res2 = torch.cross(x, y, dim=-1)
res3 = torch.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@dtypes(torch.float32, torch.complex64)
def test_linalg_cross_with_and_without_dim(self, device, dtype):
x = torch.rand(100, 3, dtype=dtype, device=device)
y = torch.rand(100, 3, dtype=dtype, device=device)
res1 = torch.linalg.cross(x, y, dim=1)
res2 = torch.linalg.cross(x, y, dim=-1)
res3 = torch.linalg.cross(x, y)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
def test_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "no dimension of size 3 in input",
lambda: torch.cross(torch.rand(5, 4, device=device), torch.rand(5, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_linalg_cross_errors(self, device):
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(100, 3, device=device), torch.rand(100, 3, 10, device=device)))
self.assertRaisesRegex(
RuntimeError, "must match the size of tensor",
lambda: torch.linalg.cross(torch.rand(5, 3, device=device), torch.rand(3, 5, device=device)))
self.assertRaisesRegex(
RuntimeError, "dimension 0 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 4, 3, device=device), torch.rand(5, 4, 3, device=device), dim=0))
self.assertRaisesRegex(
RuntimeError, "dimension -1 does not have size 3",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-1))
self.assertRaisesRegex(
IndexError, "Dimension out of range",
lambda: torch.linalg.cross(torch.rand(5, 3, 4, device=device), torch.rand(5, 3, 4, device=device), dim=-5))
def test_renorm(self, device):
m1 = torch.randn(20, 20, device=device) # big enough to exercise vectorized path
res1 = torch.tensor((), device=device)
def renorm(matrix, value, dim, max_norm):
m1 = matrix.transpose(dim, 0).contiguous()
# collapse non-dim dimensions.
m2 = m1.clone().resize_(m1.size(0), int(math.floor(m1.nelement() / m1.size(0))))
norms = m2.norm(value, 1, True)
# clip
new_norms = norms.clone()
new_norms[torch.gt(norms, max_norm)] = max_norm
new_norms.div_(norms.add_(1e-7))
# renormalize
m1.mul_(new_norms.expand_as(m1))
return m1.transpose(dim, 0)
# note that the axis fed to torch.renorm is different (2~=1)
maxnorm = m1.norm(2, 1).mean()
m2 = renorm(m1, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
self.assertEqual(m1, m2, atol=1e-5, rtol=0)
self.assertEqual(m1.norm(2, 0), m2.norm(2, 0), atol=1e-5, rtol=0)
m1 = torch.randn(3, 4, 5, device=device)
m2 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
maxnorm = m2.norm(2, 0).mean()
m2 = renorm(m2, 2, 1, maxnorm)
m1.renorm_(2, 1, maxnorm)
m3 = m1.transpose(1, 2).contiguous().clone().resize_(15, 4)
self.assertEqual(m3, m2)
self.assertEqual(m3.norm(2, 0), m2.norm(2, 0))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr(self, device, dtype):
def run_test(batch, m, n, fortran_contiguous):
A = make_tensor((*batch, m, n), dtype=dtype, device=device)
reflectors, tau = torch.geqrf(A)
if not fortran_contiguous:
self.assertTrue(reflectors.mT.is_contiguous())
reflectors = reflectors.contiguous()
# Q is of size m x m
Q, _ = torch.linalg.qr(A, mode='complete')
C_right = make_tensor((*batch, m, n), dtype=dtype, device=device)
C_left = make_tensor((*batch, n, m), dtype=dtype, device=device)
expected = Q @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=False)
self.assertEqual(expected, actual)
expected = C_left @ Q
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=False)
self.assertEqual(expected, actual)
expected = Q.mH @ C_right
actual = torch.ormqr(reflectors, tau, C_right, left=True, transpose=True)
self.assertEqual(expected, actual)
expected = C_left @ Q.mH
actual = torch.ormqr(reflectors, tau, C_left, left=False, transpose=True)
self.assertEqual(expected, actual)
# if tau is all zeros then the implicit matrix Q is the identity matrix
# so the actual result should be C_right in this case
zero_tau = torch.zeros_like(tau)
actual = torch.ormqr(reflectors, zero_tau, C_right, left=True, transpose=False)
self.assertEqual(C_right, actual)
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n), fortran_contiguous in product(batches, product(ns, ns), [True, False]):
run_test(batch, m, n, fortran_contiguous)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_ormqr_errors_and_warnings(self, device, dtype):
test_cases = [
# input1 size, input2 size, input3 size, error regex
((10,), (2,), (2,), r"input must have at least 2 dimensions"),
((2, 2), (2,), (2,), r"other must have at least 2 dimensions"),
((10, 6), (20,), (10, 6), r"other.shape\[-2\] must be greater than or equal to tau.shape\[-1\]"),
((6, 6), (5,), (5, 5), r"other.shape\[-2\] must be equal to input.shape\[-2\]"),
((1, 2, 2), (2, 2), (1, 2, 2), r"batch dimensions of tau to be equal to input.shape\[:-2\]"),
((1, 2, 2), (1, 2), (2, 2, 2), r"batch dimensions of other to be equal to input.shape\[:-2\]"),
]
for a_size, tau_size, c_size, error_regex in test_cases:
a = make_tensor(a_size, dtype=dtype, device=device)
tau = make_tensor(tau_size, dtype=dtype, device=device)
c = make_tensor(c_size, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.ormqr(a, tau, c)
def test_blas_empty(self, device):
def fn(torchfn, *args, test_out=False, **kwargs):
def call_torch_fn(*args, **kwargs):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args), **kwargs)
result = call_torch_fn(*args, **kwargs)
if not test_out:
return result
else:
out = torch.full_like(result, math.nan)
out1 = call_torch_fn(*args, **kwargs, out=out)
return out
# mm, addmm
self.assertEqual((0, 0), fn(torch.mm, (0, 0), (0, 0)).shape)
self.assertEqual((0, 5), fn(torch.mm, (0, 0), (0, 5)).shape)
self.assertEqual((5, 0), fn(torch.mm, (5, 0), (0, 0)).shape)
self.assertEqual((3, 0), fn(torch.mm, (3, 2), (2, 0)).shape)
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6)))
self.assertEqual(torch.zeros((5, 6), device=device), fn(torch.mm, (5, 0), (0, 6), test_out=True))
self.assertEqual((0, 0), fn(torch.addmm, (0, 0), (0, 0), (0, 0)).shape)
self.assertEqual((0, 1), fn(torch.addmm, (1, ), (0, 17), (17, 1)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6)))
self.assertEqual(t, fn(torch.addmm, t, (5, 0), (0, 6), test_out=True))
# mv, addmv
self.assertEqual((0,), fn(torch.mv, (0, 0), (0,)).shape)
self.assertEqual((0,), fn(torch.mv, (0, 2), (2,)).shape)
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,)))
self.assertEqual(torch.zeros((3,), device=device), fn(torch.mv, (3, 0), (0,), test_out=True))
self.assertEqual((0,), fn(torch.addmv, (0,), (0, 0), (0,)).shape)
t = torch.randn((3,), device=device)
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,)))
self.assertEqual(t, fn(torch.addmv, t, (3, 0), (0,), test_out=True))
# bmm, baddbmm
self.assertEqual((0, 0, 0), fn(torch.bmm, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.bmm, (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.bmm, (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6)))
self.assertEqual(torch.zeros((3, 5, 6), device=device), fn(torch.bmm, (3, 5, 0), (3, 0, 6), test_out=True))
self.assertEqual((0, 0, 0), fn(torch.baddbmm, (0, 0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((3, 0, 5), fn(torch.baddbmm, (3, 0, 5), (3, 0, 0), (3, 0, 5)).shape)
self.assertEqual((0, 5, 6), fn(torch.baddbmm, (0, 5, 6), (0, 5, 0), (0, 0, 6)).shape)
self.assertEqual((3, 5, 6), fn(torch.baddbmm, (3, 5, 6), (3, 5, 0), (3, 0, 6)).shape)
c = torch.arange(30, dtype=torch.float32, device=device).reshape(3, 2, 5)
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2)) # Issue #33467
self.assertEqual(-2 * c, fn(torch.baddbmm, c, (3, 2, 0), (3, 0, 5), beta=-2, test_out=True)) # Issue #33467
# addbmm
self.assertEqual((0, 0), fn(torch.addbmm, (0, 0), (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((0, 5), fn(torch.addbmm, (0, 5), (3, 0, 0), (3, 0, 5)).shape)
t = torch.randn((5, 6), device=device)
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6)))
self.assertEqual(t, fn(torch.addbmm, t, (0, 5, 0), (0, 0, 6), test_out=True))
# matmul
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.matmul, (0,), (0,), test_out=True))
self.assertEqual((0, 0), fn(torch.matmul, (0, 0), (0, 0)).shape)
self.assertEqual((0, 0, 0), fn(torch.matmul, (0, 0, 0), (0, 0, 0)).shape)
self.assertEqual((5, 0, 0), fn(torch.matmul, (5, 0, 0), (5, 0, 0)).shape)
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4)))
self.assertEqual(torch.zeros((5, 3, 4), device=device), fn(torch.matmul, (5, 3, 0), (5, 0, 4), test_out=True))
# dot
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.dot, (0,), (0,), test_out=True))
if torch._C.has_lapack:
# lu
A_LU, pivots = fn(torch.lu, (0, 5, 5))
self.assertEqual([(0, 5, 5), (0, 5)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (0, 0, 0))
self.assertEqual([(0, 0, 0), (0, 0)], [A_LU.shape, pivots.shape])
A_LU, pivots = fn(torch.lu, (2, 0, 0))
self.assertEqual([(2, 0, 0), (2, 0)], [A_LU.shape, pivots.shape])
@dtypesIfCUDA(torch.cfloat, torch.cdouble,
*get_all_fp_dtypes(include_half=not CUDA9, include_bfloat16=(CUDA11OrLater and SM53OrLater)))
@dtypes(*(set(get_all_dtypes()) - {torch.half, torch.bool}))
def test_blas_alpha_beta_empty(self, device, dtype):
# This test is disabled on CUDA 9 due to:
# See: https://github.com/pytorch/pytorch/issues/31006
if dtype is torch.bfloat16 and self.device_type == 'xla':
# TODO (@zasdfgbnm): this causes the following error on test
# TestTorchDeviceTypeXLA.test_blas_alpha_beta_empty_xla_bfloat16:
#
# RuntimeError: _th_equal not supported on CPUType for BFloat16
return
# ensure beta is respected
value = 11
input = torch.full((2,), value, dtype=dtype, device=device)
mat = torch.ones((2, 0), dtype=dtype, device=device)
vec = torch.ones((0,), dtype=dtype, device=device)
out = torch.empty((2,), dtype=dtype, device=device)
if dtype.is_complex:
alpha = 6 + 7j
beta = 3 + 4j
else:
alpha = 6
beta = 3
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2,), beta * value, dtype=dtype, device=device),
torch.addmv(input=input, mat=mat, vec=vec, alpha=alpha, beta=beta, out=out))
# torch.addmm
input = torch.full((2, 3), value, dtype=dtype, device=device)
mat2 = torch.ones((0, 3), dtype=dtype, device=device)
out = torch.empty((2, 3), dtype=dtype, device=device)
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta))
self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device),
torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out))
@dtypes(*(get_all_complex_dtypes() + get_all_fp_dtypes()))
def test_blas_nan_out(self, device, dtype):
# These functions should work correctly with NaN filled outputs,
# but need special handling, see [NOTE: cpu_zero]
b = 3
n = 5
m = 7
p = 11
# torch.mv
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), float('nan'), device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
self.assertEqual(0, torch.isnan(torch.mv(nm, _m)).sum())
# torch.mm
mp = torch.randn((p, m), device=device).t()
np_out = torch.full((n, p), float('nan'), device=device)
self.assertEqual(torch.mm(nm, mp), torch.mm(nm, mp, out=np_out))
# torch.bmm
bnm = torch.randn((b, m, n), device=device).transpose(1, 2)
bmp = torch.randn((b, p, m), device=device).transpose(1, 2)
bnp_out = torch.full((b, n, p), float('nan'), device=device)
self.assertEqual(torch.bmm(bnm, bmp), torch.bmm(bnm, bmp, out=bnp_out))
@onlyCPU # not supported by CUBLAS
def test_blas_mv_large_input(self, device):
# This would previously fail if the allocated output had NaNs, see:
# https://github.com/pytorch/pytorch/issues/31663 and [NOTE: cpu_zero]
n = 3000
m = 200
nm = torch.randn((m, n), device=device).t()
_m = torch.randn((), device=device).expand(m)
_m_out = torch.full((m,), 0., device=device)
self.assertEqual(torch.mv(nm, _m), torch.mv(nm, _m, out=_m_out))
@onlyCPU
def test_renorm_ps(self, device):
# full reduction
x = torch.randn(5, 5)
xn = x.numpy()
for p in [1, 2, 3, 4, inf]:
res = x.renorm(p, 1, 1)
expected = x / x.norm(p, 0, keepdim=True).clamp(min=1)
self.assertEqual(res, expected, msg="renorm failed for {}-norm".format(p))
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
@dtypes(*floating_and_complex_types())
def test_householder_product(self, device, dtype):
def generate_reflectors_and_tau(A):
"""
This function uses numpy.linalg.qr with mode "raw" to extract output of LAPACK's geqrf.
There is torch.geqrf function but it doesn't work with complex-valued input.
"""
if A.numel() > 0:
A_cpu = A.cpu()
flattened_batch_shape = [-1, *A_cpu.shape[-2:]]
reflectors = torch.empty_like(A_cpu).view(*flattened_batch_shape)
tau_shape = [*A_cpu.shape[:-2], A_cpu.shape[-1]]
tau = torch.empty(tau_shape, dtype=dtype).view(-1, A_cpu.shape[-1])
for A_i, reflectors_i, tau_i in zip(A_cpu.contiguous().view(*flattened_batch_shape), reflectors, tau):
reflectors_tmp, tau_i[:] = map(torch.from_numpy, np.linalg.qr(A_i, mode='raw'))
reflectors_i[:] = reflectors_tmp.T
reflectors = reflectors.view(*A_cpu.shape)
tau = tau.view(tau_shape)
return reflectors.to(A.device), tau.to(A.device)
reflectors = torch.empty_like(A)
tau = torch.empty(*A.shape[:-2], A.shape[-1], dtype=dtype, device=device)
return reflectors, tau
def run_test(shape):
A = torch.randn(*shape, dtype=dtype, device=device)
reflectors, tau = generate_reflectors_and_tau(A)
expected, _ = torch.linalg.qr(A)
actual = torch.linalg.householder_product(reflectors, tau)
# torch.linalg.qr does not work correctly for zero batch dimension tensors
# see https://github.com/pytorch/pytorch/issues/50576
if (A.numel() > 0):
self.assertEqual(expected, actual)
else:
self.assertTrue(actual.shape == shape)
# if tau is empty and A is not the result should be a matrix with ones on the diagonal
if (A.numel() > 0):
tau_empty = torch.empty(*shape[:-2], 0, dtype=dtype, device=device)
identity_mat = torch.zeros_like(reflectors)
identity_mat.diagonal(dim1=-1, dim2=-2)[:] = 1
actual = torch.linalg.householder_product(reflectors, tau_empty)
self.assertEqual(actual, identity_mat)
out = torch.empty_like(A)
ans = torch.linalg.householder_product(reflectors, tau, out=out)
self.assertEqual(ans, out)
if (A.numel() > 0):
self.assertEqual(expected, out)
shapes = [(0, 0), (5, 0), # Empty matrix
(5, 5), (5, 3), # Single matrix
(0, 0, 0), (0, 5, 5), (0, 5, 3), # Zero batch dimension tensors
(2, 5, 5), (2, 5, 3), # 3-dim tensors
(2, 1, 5, 5), (2, 1, 5, 3)] # 4-dim tensors
for shape in shapes:
run_test(shape)
@skipCPUIfNoLapack
@skipCUDAIfNoCusolver
def test_householder_product_errors_and_warnings(self, device):
test_cases = [
# input1 size, input2 size, error regex
((10,), (2,), r"input must have at least 2 dimensions"),
((10, 6), (20,), r"input.shape\[-1\] must be greater than or equal to tau.shape\[-1\]"),
((6, 10), (5,), r"input.shape\[-2\] must be greater than or equal to input.shape\[-1\]"),
]
for a_size, tau_size, error_regex in test_cases:
a = torch.rand(*a_size, device=device)
tau = torch.rand(*tau_size, device=device)
with self.assertRaisesRegex(RuntimeError, error_regex):
torch.linalg.householder_product(a, tau)
# if out tensor with wrong shape is passed a warning is given
reflectors = torch.randn(3, 3, device=device)
tau = torch.randn(3, device=device)
out = torch.empty(2, 3, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.householder_product(reflectors, tau, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty_like(reflectors).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.linalg.householder_product(reflectors, tau, out=out)
with self.assertRaisesRegex(RuntimeError, "tau dtype Int does not match input dtype"):
torch.linalg.householder_product(reflectors, tau.to(torch.int))
if torch.cuda.is_available():
# device of out and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty_like(reflectors).to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau, out=out)
# device of tau and input should match
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
tau = tau.to(wrong_device)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.linalg.householder_product(reflectors, tau)
@precisionOverride({torch.float32: 1e-4, torch.complex64: 1e-4})
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_linalg_lu_factor_and_lu(self, device, dtype):
# Tests lu, linalg.lu_factor and linalg.lu_factor_ex
from torch.testing._internal.common_utils import random_matrix
def run_test(A, pivot, singular, fn):
k = min(A.shape[-2:])
batch = A.shape[:-2]
check_errors = (fn == torch.linalg.lu_factor)
if singular and check_errors:
# It may or may not throw as the LU decomposition without pivoting
# may still succeed for singular matrices
try:
LU, pivots = fn(A, pivot=pivot)
except RuntimeError:
return
else:
LU, pivots = fn(A, pivot=pivot)[:2]
self.assertEqual(LU.size(), A.shape)
self.assertEqual(pivots.size(), batch + (k,))
if not pivot:
self.assertEqual(pivots, torch.arange(1, 1 + k, device=device, dtype=torch.int32).expand(batch + (k, )))
P, L, U = torch.lu_unpack(LU, pivots)
self.assertEqual(P @ L @ U, A)
sizes = ((3, 3), (5, 5), (4, 2), (3, 4), (0, 0), (0, 1), (1, 0))
batches = ((0,), (2,), (3,), (1, 0), (3, 5))
# Non pivoting just implemented for CUDA
pivots = (True, False) if self.device_type == "cuda" else (True,)
fns = (partial(torch.lu, get_infos=True), torch.linalg.lu_factor, torch.linalg.lu_factor_ex)
for ms, batch, pivot, singular, fn in itertools.product(sizes, batches, pivots, (True, False), fns):
m, n = ms
A = random_matrix(m, n, *batch, singular=singular, dtype=dtype, device=device)
# Just do one of them on singular matrices
if A.numel() == 0 and not singular:
continue
run_test(A, pivot, singular, fn)
# Reproducer of a magma bug,
# see https://bitbucket.org/icl/magma/issues/13/getrf_batched-kernel-produces-nans-on
# This is also a bug in cuSOLVER < 11.3
if (dtype == torch.double
and singular
and (torch.version.cuda is None or
torch.version.cuda.split('.') >= ["11", "3"])):
A = torch.ones(batch + ms, dtype=dtype, device=device)
run_test(A, pivot, singular, fn)
# Info should be positive for rank deficient matrices
A = torch.ones(5, 3, 3, device=device)
self.assertTrue((torch.linalg.lu_factor_ex(A, pivot=True).info >= 0).all())
if self.device_type == 'cpu':
# Error checking, no pivoting variant on CPU
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
torch.lu(torch.empty(1, 2, 2), pivot=False)
with self.assertRaisesRegex(RuntimeError, 'LU without pivoting is not implemented on the CPU'):
torch.linalg.lu_factor(torch.empty(1, 2, 2), pivot=False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
@skipCUDAIfRocm
@precisionOverride({torch.float: 1e-3})
def test_lu_unpack(self, device, dtype):
def run_test(pivot):
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3)):
a = torch.randn(*shape, dtype=dtype, device=device)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
for shape in ((3, 3), (5, 3, 3), (7, 3, 5, 5), (7, 5, 3, 3, 3),
(3, 5), (5, 3), (3, 3, 5), (3, 5, 3),
(7, 5, 3, 5, 3), (7, 5, 3, 3, 5),
# empty tensors
(0, 0), (0, 0, 0), (0, 3, 3)
):
a = make_tensor(shape, dtype=dtype, device=device, low=-0.1, high=+0.1)
a_lu, p = torch.lu(a, pivot=pivot)
p_ref, l_ref, u_ref = torch.lu_unpack(a_lu, p)
self.assertEqual(p_ref.matmul(l_ref.matmul(u_ref)), a)
run_test(True)
if self.device_type == 'cuda':
run_test(False)
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(torch.double)
def test_lu_unpack_check_input(self, device, dtype):
x = torch.rand(5, 5, 5, device=device, dtype=dtype)
lu_data, lu_pivots = torch.lu(x, pivot=True)
with self.assertRaisesRegex(RuntimeError, "torch.int32 dtype"):
torch.lu_unpack(lu_data, lu_pivots.long())
with self.assertRaisesRegex(RuntimeError, "contiguous tensor"):
torch.lu_unpack(lu_data, lu_pivots.mT)
# check that onces flags are unset, Nones are returned
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False)
self.assertTrue((l == u) and l is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_pivots=False)
self.assertTrue(p is None)
p, l, u = torch.lu_unpack(lu_data, lu_pivots, unpack_data=False, unpack_pivots=False)
self.assertTrue((p == l == u) and p is None)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_basic(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'basic')
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
@skipCUDAIfRocm
def test_lobpcg_ortho(self, device, dtype):
self._test_lobpcg_method(device, dtype, 'ortho')
def _test_lobpcg_method(self, device, dtype, method):
from torch.testing._internal.common_utils import random_symmetric_pd_matrix, random_sparse_pd_matrix
from torch._linalg_utils import matmul, qform
from torch._lobpcg import lobpcg
def test_tracker(worker):
k = worker.iparams['k']
nc = worker.ivars['converged_count']
if k <= nc:
tol = worker.fparams['tol']
rerr = worker.tvars['rerr']
X = worker.X
E = worker.E
B = worker.B
A = worker.A
dtype = X.dtype
device = X.device
# Check convergence
self.assertLessEqual(rerr[:k].max(), tol)
# Check B-orthogonality
I = torch.eye(k, k, dtype=dtype, device=device)
self.assertEqual(qform(B, X[:, :k]), I)
# Check block equation
self.assertEqual(qform(A, X[:, :k]) / E[:k], I, atol=0.2, rtol=0)
orig_lobpcg = lobpcg
def lobpcg(*args, **kwargs):
kwargs['tracker'] = test_tracker
kwargs['niter'] = 1000
kwargs['method'] = method
kwargs['tol'] = 1e-8
return orig_lobpcg(*args, **kwargs)
prec = 5e-4
# check dense input
mm = torch.matmul
for batches in [(), (2,), (2, 3)]:
for m, n, k in [
(9, 3, 1),
(9, 3, 2),
(9, 2, 2),
(100, 15, 5),
]:
# skip tests that are known to fail with the basic
# LOBPCG method due to calling cholesky on singular
# input
if method == 'basic' and (m, n, k) in [(9, 2, 2), (100, 15, 5)]:
continue
A = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
B = random_symmetric_pd_matrix(m, *batches, device=device, dtype=dtype)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E.shape, batches + (k,))
self.assertEqual(V.shape, batches + (m, k))
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
e = torch.symeig(A)[0]
e_smallest = e[..., :k]
self.assertEqual(E, e_smallest)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
e_largest, _ = torch.sort(e[..., -k:], descending=True)
self.assertEqual(E, e_largest, atol=prec, rtol=0)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), mm(matmul(B, V), E.diag_embed()), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
# check sparse input
for m, n, k, density in [
(5, 1, 1, 0.8),
(9, 3, 2, 0.5),
(100, 1, 1, 0.1),
(1000, 7, 3, 0.01),
]:
# skip tests that are known to fail with the basic LOBCG
# method due to insufficient accuracy
if method == 'basic' and (m, n, k, density) in [(1000, 7, 3, 0.01)]:
continue
A = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
B = random_sparse_pd_matrix(m, density=density, device=device, dtype=dtype)
A_eigenvalues = torch.arange(1, m + 1, dtype=dtype) / m
e_smallest = A_eigenvalues[..., :k]
e_largest, _ = torch.sort(A_eigenvalues[..., -k:], descending=True)
# classical eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=False)
self.assertEqual(E, e_smallest)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
# classical eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V), mm(V, E.diag_embed()), atol=prec, rtol=0)
self.assertEqual(E, e_largest)
# generalized eigenvalue problem, smallest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=False)
self.assertEqual(matmul(A, V), matmul(B, mm(V, E.diag_embed())), atol=prec, rtol=0)
# generalized eigenvalue problem, largest eigenvalues
E, V = lobpcg(A, B=B, k=k, n=n, largest=True)
self.assertEqual(matmul(A, V) / E.max(), mm(matmul(B, V), (E / E.max()).diag_embed()),
atol=prec, rtol=0)
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_torchscript(self, device, dtype):
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
lobpcg = torch.jit.script(torch.lobpcg)
m = 500
k = 5
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
E1, V1 = lobpcg(A1, X=X1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
self.assertLess(eq_err, 1e-6)
@unittest.skipIf(not TEST_SCIPY or (TEST_SCIPY and scipy.__version__ < '1.4.1'), "Scipy not found or older than 1.4.1")
@skipCPUIfNoLapack
@onlyCPU
@dtypes(torch.double)
def test_lobpcg_scipy(self, device, dtype):
"""Compare torch and scipy.sparse.linalg implementations of lobpcg
"""
import time
from torch.testing._internal.common_utils import random_sparse_pd_matrix
from torch._linalg_utils import matmul as mm
from scipy.sparse.linalg import lobpcg as scipy_lobpcg
import scipy.sparse
def toscipy(A):
if A.layout == torch.sparse_coo:
values = A.coalesce().values().cpu().numpy().copy()
indices = A.coalesce().indices().cpu().numpy().copy()
return scipy.sparse.coo_matrix((values, (indices[0], indices[1])), A.shape)
return A.cpu().numpy().copy()
niter = 1000
repeat = 10
m = 500 # size of the square matrix
k = 7 # the number of requested eigenpairs
A1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
B1 = random_sparse_pd_matrix(m, density=2.0 / m, device=device, dtype=dtype)
X1 = torch.randn((m, k), dtype=dtype, device=device)
A2 = toscipy(A1)
B2 = toscipy(B1)
X2 = toscipy(X1)
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
tol = 1e-8
# tol for scipy lobpcg will be choosed so that the number of
# iterations will be equal or very close to pytorch lobpcg
# (that is around 170-180)
# Standard eigenvalue problem
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=1.1 * tol)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
E2a, V2a = scipy_lobpcg(A2, X2, maxiter=niter, largest=False)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # std
self.assertLess(eq_err_scipy, 1e-6) # std
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Generalized eigenvalue problem
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, B=B1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=39 * tol)
E2a, V2a = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=False)
iters1 = len(lambdas1)
iters2 = len(lambdas2)
self.assertLess(abs(iters1 - iters2), 0.05 * max(iters1, iters2))
eq_err = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
eq_err_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
self.assertLess(eq_err, 1e-6) # general
self.assertLess(eq_err_scipy, 1e-6) # general
self.assertEqual(E1, torch.from_numpy(E2.copy()))
# Timings
elapsed_ortho = 0
elapsed_ortho_general = 0
elapsed_scipy = 0
elapsed_general_scipy = 0
for i in range(repeat):
start = time.time()
torch.lobpcg(A1, X=X1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho += end - start
start = time.time()
torch.lobpcg(A1, X=X1, B=B1, niter=niter, method='ortho', tol=tol)
end = time.time()
elapsed_ortho_general += end - start
start = time.time()
scipy_lobpcg(A2, X2, maxiter=niter, tol=1.1 * tol)
end = time.time()
elapsed_scipy += end - start
start = time.time()
scipy_lobpcg(A2, X2, B=B2, maxiter=niter, tol=39 * tol)
end = time.time()
elapsed_general_scipy += end - start
elapsed_ortho_ms = 1000.0 * elapsed_ortho / repeat
elapsed_ortho_general_ms = 1000.0 * elapsed_ortho_general / repeat
elapsed_scipy_ms = 1000.0 * elapsed_scipy / repeat
elapsed_general_scipy_ms = 1000.0 * elapsed_general_scipy / repeat
print('''
CPU timings: torch.lobpcg vs scipy.sparse.linalg.lobpcg
-------------------------------------------------------
| standard | generalized | method
torch.lobpcg | {:10.2f} | {:10.2f} | ortho
scipy_lobpcg | {:10.2f} | {:10.2f} | N/A
-(input size: {:4}, eigenpairs:{:2}, units: ms per call)-
'''.format(elapsed_ortho_ms, elapsed_ortho_general_ms,
elapsed_scipy_ms, elapsed_general_scipy_ms,
m, k))
# Handling of very small tolerence
tol = 1e-100
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1 = len(lambdas1)
eq_err = torch.norm((mm(A1, V1) - V1 * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2 = len(lambdas2)
eq_err_scipy = (abs(A2.dot(V2) - V2 * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [standard]:', msg)
iters2 = -1
eq_err_scipy = -1
lambdas1 = []
def tracker(worker):
lambdas1.append(worker.E[:])
E1, V1 = torch.lobpcg(A1, X=X1, B=B1, niter=niter, largest=True, tracker=tracker, tol=tol)
iters1_general = len(lambdas1)
eq_err_general = torch.norm((mm(A1, V1) - mm(B1, V1) * E1), 2) / E1.max()
try:
E2, V2, lambdas2 = scipy_lobpcg(A2, X2, B=B2, maxiter=niter, largest=True, retLambdaHistory=True, tol=tol)
iters2_general = len(lambdas2)
eq_err_general_scipy = (abs(A2.dot(V2) - B2.dot(V2) * E2)**2).sum() ** 0.5 / E2.max()
except Exception as msg:
print('Calling scipy_lobpcg failed [generalized]:', msg)
iters2_general = -1
eq_err_general_scipy = -1
print('''\
Handling of small tol={:6.0e}: torch.lobpcg vs scipy.sparse.linalg.lobpcg
----------------------------------------------------------------------------
| standard | generalized | niter | method
torch.lobpcg | {:10.2e} | {:10.2e} | {:6} | ortho
scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A
---(input size: {:4}, eigenpairs:{:2}, units: relative error, maxiter={:4})---
'''.format(tol, eq_err, eq_err_general, iters1, eq_err_scipy, eq_err_general_scipy, iters2, m, k, niter))
def _test_addmm_addmv(self, f, t, m, v, *, alpha=None, beta=None, transpose_out=False):
dtype = t.dtype
numpy_dtype = dtype
if dtype in {torch.bfloat16}:
numpy_dtype = torch.float
if dtype.is_complex:
alpha = 0.9 + 0.3j if alpha is None else alpha
beta = 0.5 + 0.6j if beta is None else beta
else:
alpha = 1.2 if alpha is None else alpha
beta = 0.8 if beta is None else beta
res1 = f(t, m, v, alpha=alpha, beta=beta)
res2 = torch.full_like(res1, math.nan)
if transpose_out:
res2 = res2.t().clone(memory_format=torch.contiguous_format).t()
f(t, m, v, alpha=alpha, beta=beta, out=res2)
res3 = alpha * (m.to(numpy_dtype).cpu().numpy() @ v.to(numpy_dtype).cpu().numpy())
if beta != 0:
res3 += (beta * t).to(numpy_dtype).cpu().numpy()
res3 = torch.from_numpy(res3).to(dtype)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
@precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8,
torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)),
include_half=(not TEST_WITH_ROCM)))
@dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_addmv(self, device, dtype):
# have to use torch.randn(...).to(bfloat16) instead of
# torch.randn(..., dtype=bfloat16). randn does not support
# bfloat16 yet.
# "*0.2" to reduce errors for low precision
ts = [
0.2 * torch.randn(50, device=device).to(dtype),
0.2 * torch.randn(1, device=device).to(dtype).expand(50),
]
vs = [
0.2 * torch.randn(100, device=device).to(dtype),
0.2 * torch.ones(1, device=device).to(dtype).expand(100), # to reduce errors for low precision
]
ms = [
# 0d
0.2 * torch.ones((), device=device).to(dtype).expand(50, 100), # to reduce errors for low precision
# 1d
0.2 * torch.randn((1, 100), device=device).to(dtype).expand(50, 100),
# this initialization reduces errors for low precision for broadcasted matrices
# by making sure that intermediate and result values are exactly representable
# in low precision type
0.2 * torch.randint(3, (50, 1), dtype=torch.float, device=device).to(dtype).expand(50, 100),
# 2d
0.2 * torch.randn((50, 100), device=device).to(dtype),
0.2 * torch.randn((100, 50), device=device).to(dtype).t(),
]
for m, v, t in itertools.product(ms, vs, ts):
self._test_addmm_addmv(torch.addmv, t, m, v)
# Test beta=0, t=nan
t = torch.full((50,), math.nan, device=device).to(dtype)
for m, v in itertools.product(ms, vs):
self._test_addmm_addmv(torch.addmv, t, m, v, beta=0)
@dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(torch.float, torch.double)
def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype):
# tests (o, s)*(s). o is output size, s is summed size.
o = 5
s = 3
a_data = torch.arange(1, o * s + 1, device=device, dtype=dtype).view(o, s)
x_data = torch.arange(1, s + 1, 1, device=device, dtype=dtype)
y_data = torch.ones(o, device=device, dtype=dtype)
control = torch.tensor([15., 33., 51., 69., 87.], device=device, dtype=dtype)
def _test(row_major, incx, incy, lda_tail):
if row_major:
a_storage = torch.full((o, s + lda_tail), float('nan'), device=device, dtype=dtype)
else:
a_storage = torch.full((s, o + lda_tail), float('nan'), device=device, dtype=dtype).permute(1, 0)
a = a_storage[:o, :s].copy_(a_data)
x_storage = torch.full((s, incx), float('nan'), device=device, dtype=dtype)
x = x_storage[:, 0].copy_(x_data)
y_storage = torch.full((o, incy), float('nan'), device=device, dtype=dtype)
y = y_storage[:, 0].copy_(y_data)
self._test_addmm_addmv(torch.addmv, y, a, x)
for row_major, incx, incy, lda_tail in itertools.product((False, True), (1, 2), (1, 2), (0, 1)):
_test(row_major, incx, incy, lda_tail)
@precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6,
torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8})
@dtypesIfCUDA(*get_all_complex_dtypes(),
*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater))))
@dtypes(*get_all_complex_dtypes(), *get_all_fp_dtypes())
@tf32_on_and_off(0.05)
def test_addmm(self, device, dtype):
M = torch.randn(10, 25, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test 0-strided
M = torch.randn(10, 1, device=device).to(dtype).expand(10, 25)
m1 = torch.randn(10, 1, device=device).to(dtype).expand(10, 50)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
# Test beta=0, M=nan
M = torch.full((10, 25), math.nan, device=device).to(dtype)
m1 = torch.randn(10, 50, device=device).to(dtype)
m2 = torch.randn(50, 25, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2, beta=0)
# Test transpose
for t1, t2, t3, t4 in itertools.product([True, False], repeat=4):
def maybe_transpose(cond, m):
if not cond:
return m
return m.t().clone(memory_format=torch.contiguous_format).t()
M = maybe_transpose(t1, torch.randn(10, 25, device=device).to(dtype))
m1 = maybe_transpose(t2, torch.randn(10, 50, device=device).to(dtype))
m2 = maybe_transpose(t3, torch.randn(50, 25, device=device).to(dtype))
self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4)
@dtypes(torch.float, torch.double)
@dtypesIfCUDA(*([torch.float, torch.double] + get_all_complex_dtypes()))
@tf32_on_and_off(0.005)
def test_addmm_sizes(self, device, dtype):
for m in [0, 1, 25]:
for n in [0, 1, 10]:
for k in [0, 1, 8]:
M = torch.randn(n, m, device=device).to(dtype)
m1 = torch.randn(n, k, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self._test_addmm_addmv(torch.addmm, M, m1, m2)
m1 = torch.randn(n, k + 1, device=device).to(dtype)
m2 = torch.randn(k, m, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.addmm(M, m1, m2))
self.assertRaisesRegex(RuntimeError, f"{n}x{k + 1}.*{k}x{m}", lambda: torch.mm(m1, m2))
@dtypes(torch.half)
@onlyCUDA
def test_addmm_baddbmm_overflow(self, device, dtype):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
inp = torch.zeros(128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(1000, 128, dtype=torch.half, device=device) * 100
out = torch.addmm(inp, mat1, mat2, alpha=0.001, beta=0.)
# just check for no overflow on ROCM
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
inp = torch.zeros(3, 128, 128, dtype=torch.half, device=device)
mat1 = torch.ones(3, 128, 1000, dtype=torch.half, device=device) * 100
mat2 = torch.ones(3, 1000, 128, dtype=torch.half, device=device) * 100
out = torch.baddbmm(inp, mat1, mat2, alpha=0.001, beta=0.)
if TEST_WITH_ROCM:
self.assertFalse(out.isinf().any())
else:
self.assertTrue((out == 10000.).all())
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = orig
@unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error")
@onlyCUDA
def test_matmul_45724(self, device):
# https://github.com/pytorch/pytorch/issues/45724
a = torch.rand(65537, 22, 64, device=device, dtype=torch.half)
b = torch.rand(65537, 64, 22, device=device, dtype=torch.half)
c = torch.full((65537, 22, 22), math.nan, dtype=torch.half, device=device)
cpu_result = torch.matmul(a.cpu().float(), b.cpu().float()).cuda().half()
torch.matmul(a, b, out=c)
self.assertEqual(c, cpu_result)
@slowTest
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64, torch.bfloat16, torch.int32, torch.int64, torch.cfloat, torch.cdouble)
@dtypesIfCUDA(torch.float32, torch.float64, torch.cfloat, torch.cdouble)
@tf32_on_and_off(0.01)
def test_mm(self, device, dtype):
def _test_mm(n, m, p, dtype, genf):
# helper function
def matrixmultiply(mat1, mat2):
n = mat1.size(0)
m = mat1.size(1)
p = mat2.size(1)
res = torch.zeros(n, p, dtype=dtype, device=device)
for i, j in iter_indices(res):
res[i, j] = sum(mat1[i, k] * mat2[k, j] for k in range(m))
return res
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 1
mat1 = genf(n, m)
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 2
mat1 = genf(m, n).t()
mat2 = genf(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# test with zero stride
mat1 = genf(n, m)
mat2 = genf(m, 1).expand(m, p)
res = torch.mm(mat1, mat2)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# contiguous case
mat1 = genf(n, m)
mat2 = genf(m, p)
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
# explicitly exercise the _out variant in torch.mm().
# non contiguous case 3
mat1 = genf(m, n).t()
mat2 = genf(p, m).t()
res = genf(n, p)
torch.mm(mat1, mat2, out=res)
res2 = matrixmultiply(mat1, mat2)
self.assertEqual(res, res2)
def genf_int(x, y):
return torch.randint(0, 100, (x, y), dtype=dtype, device=device)
def genf_bfloat(x, y):
return torch.randn(x, y, dtype=torch.float32, device=device).to(dtype) * 0.1
def genf_float(x, y):
return torch.randn(x, y, dtype=dtype, device=device)
for (n, m, p) in [(20, 10, 15), (15, 20, 10), (25, 18, 10)]:
if (dtype == torch.int32) or (dtype == torch.int64):
genf = genf_int
elif (dtype == torch.bfloat16):
genf = genf_bfloat
else:
genf = genf_float
_test_mm(n, m, p, dtype, genf)
@onlyNativeDeviceTypes
def test_mm_bmm_non_memory_dense(self, device):
def _slice(tensor, fn):
return fn(tensor)[..., ::2]
A = torch.randn(3, 6, dtype=torch.cfloat, device=device)
B = torch.randn(3, 3, dtype=torch.cfloat, device=device)
out = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
out1 = torch.empty(3, 3, device=device, dtype=torch.complex64).t()
A_conj = _slice(A, torch.conj)
A_conj_physical = _slice(A, torch.conj_physical)
self.assertEqual(torch.mm(A_conj, B, out=out), torch.mm(A_conj_physical, B, out=out))
self.assertEqual(torch.mm(A_conj.t(), B, out=out), torch.mm(A_conj_physical.t(), B, out=out))
Ab = torch.randn(2, 3, 6, dtype=torch.cfloat, device=device)
Bb = torch.randn(2, 3, 3, dtype=torch.cfloat, device=device)
Bb_ = torch.randn(1, 3, 3, dtype=torch.cfloat, device=device).expand(2, 3, 3)
out_b = torch.empty(2, 3, 3, device=device, dtype=torch.complex64).mT
Ab_conj = _slice(Ab, torch.conj)
Ab_conj_physical = _slice(Ab, torch.conj_physical)
def t_b(tensor):
return tensor.mT
self.assertEqual(torch.bmm(Ab_conj, Bb, out=out_b), torch.bmm(Ab_conj_physical, Bb, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb, out=out_b))
# test broadcasting
self.assertEqual(torch.bmm(Ab_conj, Bb_, out=out_b), torch.bmm(Ab_conj_physical, Bb_, out=out_b))
self.assertEqual(torch.bmm(t_b(Ab_conj), Bb_, out=out_b), torch.bmm(t_b(Ab_conj_physical), Bb_, out=out_b))
@onlyNativeDeviceTypes
@dtypes(torch.float32, torch.float64)
def test_strided_mm_bmm(self, device, dtype):
# Tests strided view case with stride smaller than corresponding dimension size
x = torch.tensor([[1., 2., 3.], [4., 5., 6.]], dtype=dtype, device=device)
new_shape = [2, 2, 2]
new_stride = [3, 1, 1]
sx = torch.as_strided(x, size=new_shape, stride=new_stride)
torch_fn = lambda x: torch.bmm(x, x) # noqa: E731
np_fn = lambda x: np.matmul(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx)
torch_fn = lambda x: torch.mm(x, x) # noqa: E731
self.compare_with_numpy(torch_fn, np_fn, sx[0])
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@skipCUDAIf(torch.version.cuda == "10.1", "flaky on CUDA 10.1")
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_bmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
batch_sizes = [1, 10]
M, N, O = 23, 15, 12
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
for num_batches in batch_sizes:
b1 = torch.randn(num_batches, M, N, device=device).to(dtype)
b2 = torch.randn(num_batches, N, O, device=device).to(dtype)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.bmm(b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_inputs(num_batches):
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-0.1, high=0.1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-0.1, high=0.1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
yield b1, b2
# broadcasting tensors
for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1)
shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-0.1, high=0.1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-0.1, high=0.1).expand(num_batches, N, O)
yield b1, b2
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = torch.randn(shape1, dtype=dtype, device=device)
b2 = torch.randn(shape2, dtype=dtype, device=device)
yield b1, b2
for num_batches in batch_sizes:
for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))):
res1 = torch.bmm(b1, b2)
res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \
.permute(perm3).contiguous().permute(invert_perm(perm3))
torch.bmm(b1, b2, out=res2)
expect = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
self.assertEqual(expect, res1)
self.assertEqual(expect, res2)
if self.device_type == 'cuda':
# check that mixed arguments are rejected
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2.cpu()))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1.cpu(), b2))
self.assertRaises(RuntimeError, lambda: torch.bmm(b1, b2, out=res2.cpu()))
def _test_addbmm_baddbmm(self, func, b1, b2, ref, out_tensor):
getattr(out_tensor, func + "_")(b1, b2)
self.assertEqual(out_tensor, ref)
res3 = out_tensor.clone()
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1, b1, b2)
self.assertEqual(out_tensor, ref * 2),
getattr(res3, func + "_")(b1, b2, beta=1)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func}_ is deprecated"):
getattr(out_tensor, func + "_")(1., .5, b1, b2)
self.assertEqual(out_tensor, ref * 2.5)
getattr(res3, func + "_")(b1, b2, beta=1., alpha=.5)
self.assertEqual(out_tensor, res3)
with self.assertWarnsOnceRegex(
UserWarning, f"This overload of {func} is deprecated"):
self.assertEqual(out_tensor, getattr(torch, func)(1, out_tensor, 0, b1, b2))
res4 = getattr(torch, func)(out_tensor, b1, b2, beta=1, alpha=.5)
self.assertEqual(res4, ref * 3),
nan = torch.full_like(out_tensor, math.nan)
res5 = getattr(torch, func)(nan, b1, b2, beta=0, alpha=1)
self.assertEqual(res5, ref)
if b1.is_complex():
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1j, alpha=.5j)
self.assertEqual(res6, out_tensor * .1j + .5j * ref)
else:
res6 = getattr(torch, func)(out_tensor, b1, b2, beta=.1, alpha=.5)
self.assertEqual(res6, out_tensor * .1 + .5 * ref)
res7 = torch.full_like(out_tensor, math.nan)
getattr(torch, func)(nan, b1, b2, beta=0, out=res7)
self.assertEqual(res7, ref)
@precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_addbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 2
M, N, O = 16, 17, 18
is_supported = True
if dtype == torch.bfloat16:
if self.device_type == 'cpu':
self.precision = 1 # 43 vs 43.75
else:
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.addbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2):
for perm3 in itertools.permutations((0, 1)):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1) * 0.1
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref).permute(perm3).contiguous().permute(perm3)
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1) * 0.1
b2 = make_tensor(shape2, device, dtype, low=-1, high=1) * 0.1
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()
).to(device=device, dtype=dtype).sum(0)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("addbmm", b1, b2, ref, out_tensor)
@precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5})
@onlyNativeDeviceTypes
@dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes())
@tf32_on_and_off(0.05)
def test_baddbmm(self, device, dtype):
if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater:
# cuBLAS does not guarantee BFloat16 support on SM < 53.
# So on PyTorch, we consider BFloat16 support on SM < 53 as
# undefined bahavior
return
num_batches = 10
M, N, O = 12, 8, 50
is_supported = True
if dtype == torch.bfloat16 and self.device_type == 'cuda':
is_supported = TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)
if not is_supported:
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
t = make_tensor((num_batches, M, O), device, dtype, low=-1, high=1)
self.assertRaisesRegex(RuntimeError, "type|Type|not implemented|CUBLAS_STATUS_NOT_SUPPORTED",
lambda: torch.baddbmm(t, b1, b2))
return
def invert_perm(p):
d = {x: i for i, x in enumerate(p)}
return (d[0], d[1], d[2])
def generate_tensor():
numpy_dtype = dtype if dtype != torch.bfloat16 else torch.float32
# transposed tensors
for perm1, perm2, perm3 in itertools.product(itertools.permutations((0, 1, 2)), repeat=3):
b1 = make_tensor((num_batches, M, N), device, dtype, low=-1, high=1)
b2 = make_tensor((num_batches, N, O), device, dtype, low=-1, high=1)
b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1))
b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2))
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
out_tensor = out_tensor.permute(perm3).contiguous().permute(invert_perm(perm3))
yield b1, b2, ref, out_tensor
# broadcasting tensors
for s1, s2, s3, s4, s5, s6 in itertools.product((True, False), repeat=6):
shape1 = (num_batches if s1 else 1, M if s2 else 1, N if s3 else 1)
shape2 = (num_batches if s4 else 1, N if s5 else 1, O if s6 else 1)
b1 = make_tensor(shape1, device, dtype, low=-1, high=1).expand(num_batches, M, N)
b2 = make_tensor(shape2, device, dtype, low=-1, high=1).expand(num_batches, N, O)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
# zero-sized tensors
for z1, z2, z3, z4 in itertools.product((True, False), repeat=4):
shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0)
shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0)
b1 = make_tensor(shape1, device, dtype, low=-2, high=2)
b2 = make_tensor(shape2, device, dtype, low=-2, high=2)
ref = torch.from_numpy(
b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype)
out_tensor = torch.zeros_like(ref)
yield b1, b2, ref, out_tensor
for b1, b2, ref, out_tensor in generate_tensor():
self._test_addbmm_baddbmm("baddbmm", b1, b2, ref, out_tensor)
# TODO: update to compare against NumPy
@onlyCUDA
def test_solve_methods_arg_device(self, device):
for b_device, A_device in itertools.product(['cpu', device], repeat=2):
if b_device == A_device:
continue
b = torch.randn(3, 1, device=b_device)
A = torch.randn(3, 3, device=A_device)
# solve and cholesky_solve goes through generic backend dispatch and hit kernel specific device check first
# triangular_solve goes through specific backend dispatch (CPU/CUDA) and hit auto-generated device check first
generic_backend_dispatch_err_str = "Expected b and A to be on the same device"
specific_backend_dispatch_err_str = "Expected all tensors to be on the same device"
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.solve(b, A)
with self.assertRaisesRegex(RuntimeError, generic_backend_dispatch_err_str):
torch.cholesky_solve(b, A)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.triangular_solve(b, A)
# b and A have to be modified to match accepted inputs sizes for lu_solve
b = b.unsqueeze(0)
A = A.unsqueeze(0)
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=A_device).int())
# This checks if a suitable error message is thrown
# when LU output and pivots are not on the same device
with self.assertRaisesRegex(RuntimeError, specific_backend_dispatch_err_str):
torch.lu_solve(b, A, torch.rand(A.shape[:-1], device=b_device).int())
@precisionOverride({torch.float32: 5e-3, torch.complex64: 1e-3})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_pinverse(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def run_test(M):
# Testing against definition for pseudo-inverses
MPI = torch.pinverse(M)
MPI_ = MPI.cpu().numpy()
M_ = M.cpu().numpy()
if M.numel() > 0:
self.assertEqual(M_, np.matmul(np.matmul(M_, MPI_), M_))
self.assertEqual(MPI_, np.matmul(np.matmul(MPI_, M_), MPI_))
self.assertEqual(np.matmul(M_, MPI_), np.matmul(M_, MPI_).swapaxes(-2, -1).conj())
self.assertEqual(np.matmul(MPI_, M_), np.matmul(MPI_, M_).swapaxes(-2, -1).conj())
else:
self.assertEqual(M.shape, MPI.shape[:-2] + (MPI.shape[-1], MPI.shape[-2]))
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5), # square matrices
(3, 2), (5, 3, 2), (7, 5, 3, 2), # fat matrices
(2, 3), (5, 2, 3), (7, 5, 2, 3), # thin matrices
(0, 0), (0, 2), (2, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3)]: # zero numel matrices
M = torch.randn(*sizes, dtype=dtype, device=device)
run_test(M)
# Test inverse and pseudo-inverse for invertible matrix
for sizes in [(5, 5), (3, 5, 5), (3, 7, 5, 5)]:
matsize = sizes[-1]
batchdims = sizes[:-2]
M = make_arg(*batchdims, matsize, matsize)
self.assertEqual(torch.eye(matsize, dtype=dtype, device=device).expand(sizes), M.pinverse().matmul(M),
atol=1e-7, rtol=0, msg='pseudo-inverse for invertible matrix')
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_non_negative(self, device, dtype):
def check(*size, noncontiguous=False):
t = make_tensor(size, device, dtype, noncontiguous=noncontiguous)
for n in range(8):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(1, 1)
check(5, 5)
check(5, 5, noncontiguous=True)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 4, 4, noncontiguous=True)
@skipCPUIfNoLapack
@skipCUDAIfNoMagmaAndNoCusolver
@dtypes(torch.double, torch.cdouble)
def test_matrix_power_negative(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_arg = partial(make_fullrank, device=device, dtype=dtype)
def check(*size):
t = make_arg(*size)
for n in range(-7, 0):
res = torch.linalg.matrix_power(t, n)
ref = np.linalg.matrix_power(t.cpu().numpy(), n)
self.assertEqual(res.cpu(), torch.from_numpy(ref))
check(0, 0)
check(5, 5)
check(2, 0, 0)
check(0, 3, 3)
check(2, 3, 3)
check(2, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.complex64)
def test_linalg_matrix_exp_utils(self, device, dtype):
# test linear combination
def run_test(coeff_shape, data_shape):
coeffs = torch.rand(*coeff_shape, device=device, dtype=torch.float)
x = torch.rand(coeff_shape[1], *data_shape, device=device, dtype=dtype)
res1 = torch._compute_linear_combination(x, coeffs)
res2 = (x.unsqueeze(0) * coeffs.view(*coeff_shape, *([1] * len(data_shape)))).sum(1)
self.assertEqual(res1, res2, atol=1e-5, rtol=0.0)
# check `out=` version
res3 = torch.zeros(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res3)
self.assertEqual(res1, res3, atol=1e-5, rtol=0.0)
res4 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
torch._compute_linear_combination(x, coeffs, out=res4)
self.assertEqual(res1, res4 - 1.0, atol=1e-5, rtol=0.0)
res5 = torch.ones(coeff_shape[0], *data_shape, device=device, dtype=dtype)
res5_clone = res5.clone()
torch._compute_linear_combination(x, coeffs, out=res5)
self.assertEqual(res1, res5 - res5_clone, atol=1e-5, rtol=0.0)
run_test([1, 3], [2, 2])
run_test([3, 1], [2, 2])
run_test([1, 10], [10, 10])
run_test([10, 1], [10, 10])
run_test([5, 3], [2, 2])
run_test([5, 3], [100, 100])
run_test([3, 4], [3, 3, 3])
run_test([3, 4], [3, 3, 3, 3])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_linalg_matrix_exp_boundary_cases(self, device, dtype):
expm = torch.linalg.matrix_exp
with self.assertRaisesRegex(RuntimeError, "Expected a floating point or complex tensor"):
expm(torch.randn(3, 3).type(torch.int))
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
expm(torch.randn(3))
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
expm(torch.randn(3, 2, 1))
# check 1x1 matrices
x = torch.randn(3, 3, 1, 1)
self.assertEqual(expm(x), x.exp())
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_analytic(self, device, dtype):
expm = torch.linalg.matrix_exp
# check zero matrix
x = torch.zeros(20, 20, dtype=dtype, device=device)
self.assertTrue((expm(x) == torch.eye(20, 20, dtype=dtype, device=device)).all().item())
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def run_test(*n):
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate input
q = gen_good_cond_number_matrices(*n)
q_ = q.cpu().numpy()
qinv = torch.inverse(q)
qinv_ = qinv.cpu().numpy()
d = torch.randn(n[:-1], dtype=dtype, device=device)
x = torch.from_numpy(
np.matmul(q_, np.matmul(torch.diag_embed(d).cpu().numpy(), qinv_))).to(device)
x_norm, _ = x.abs().sum(-2).max(-1)
# test simple analytic whatever norm generated
mexp = expm(x)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed(d.exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
# matrices to equal norm
for sample_norm in sample_norms:
x_normalized = normalize_to_1_operator_norm(x, sample_norm)
mexp = expm(x_normalized)
mexp_analytic = np.matmul(
q_,
np.matmul(
torch.diag_embed((d / x_norm.unsqueeze(-1) * sample_norm).exp()).cpu().numpy(),
qinv_
)
)
self.assertEqual(mexp, mexp_analytic, atol=1e-3, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
run_test(100, 100)
run_test(200, 200)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
run_test(3, 100, 100)
run_test(3, 200, 200)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
run_test(3, 3, 100, 100)
run_test(3, 3, 200, 200)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double)
def test_linalg_matrix_exp_batch(self, device, dtype):
def run_test(*n):
tensors_batch = torch.zeros(n, dtype=dtype, device=device)
tensors_batch = tensors_batch.view(-1, n[-2], n[-1])
num_matrices = tensors_batch.size(0)
tensors_list = []
for i in range(num_matrices):
tensors_list.append(torch.randn(n[-2], n[-1], dtype=dtype, device=device))
for i in range(num_matrices):
tensors_batch[i, ...] = tensors_list[i]
tensors_exp_map = (torch.linalg.matrix_exp(x) for x in tensors_list)
tensors_exp_batch = torch.linalg.matrix_exp(tensors_batch)
for i, tensor_exp in enumerate(tensors_exp_map):
self.assertEqual(tensors_exp_batch[i, ...], tensor_exp)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float, torch.double, torch.cfloat, torch.cdouble)
def test_linalg_matrix_exp_compare_with_taylor(self, device, dtype):
def normalize_to_1_operator_norm(sample, desired_norm):
sample_norm, _ = sample.abs().sum(-2).max(-1)
sample_to_1_norm = sample / sample_norm.unsqueeze(-1).unsqueeze(-1)
return sample_to_1_norm * desired_norm
def gen_good_cond_number_matrices(*n):
"""
Generates a diagonally-domimant matrix
with the eigenvalues centered at 1
and the radii at most (n[-1] - 1) / (n[-2] ** 2)
"""
identity = torch.eye(n[-2], n[-1], dtype=dtype, device=device).expand(*n)
x = torch.rand(*n, dtype=dtype, device=device) / (n[-1] ** 2)
x = (x - x * identity) + identity
return x
def get_taylor_approximation(a, deg):
a_ = a.cpu().numpy()
identity = torch.eye(a.size(-2), a.size(-1), dtype=dtype, device=device).expand_as(a)
res = identity.cpu().numpy()
taylor_term = identity.cpu().numpy()
for i in range(1, deg + 1):
taylor_term = np.matmul(a_, taylor_term) / i
res = res + taylor_term
return res
def scale_square(a, deg):
if a.abs().pow(2).sum().sqrt() < 1.0:
return get_taylor_approximation(a, 12)
else:
s = int(torch.log2(a.abs().pow(2).sum().sqrt()).ceil().item())
b = a / (2 ** s)
b = get_taylor_approximation(b, 18)
for _ in range(s):
b = np.matmul(b, b)
return torch.from_numpy(b).to(a.device)
def run_test(*n):
degs = [1, 2, 4, 8, 12, 18]
if dtype == torch.float:
thetas = [
1.192092800768788e-07, # deg 1
5.978858893805233e-04, # deg 2
5.116619363445086e-02, # deg 4
5.800524627688768e-01, # deg 8
1.461661507209034e+00, # deg 12
3.010066362817634e+00 # deg 18
]
else: # if torch.double
thetas = [
2.220446049250313e-16, # deg 1
2.580956802971767e-08, # deg 2
3.397168839976962e-04, # deg 4
4.991228871115323e-02, # deg 8
2.996158913811580e-01, # deg 12
1.090863719290036e+00 # deg 18
]
# generate norms to test different degree expansions
sample_norms = []
for i in range(len(thetas) - 1):
sample_norms.append(0.5 * (thetas[i] + thetas[i + 1]))
sample_norms = [thetas[0] / 2] + sample_norms + [thetas[-1] * 2]
degs = [degs[0]] + degs
for sample_norm, deg in zip(sample_norms, degs):
x = gen_good_cond_number_matrices(*n)
x = normalize_to_1_operator_norm(x, sample_norm)
mexp = torch.linalg.matrix_exp(x)
mexp_taylor = scale_square(x, deg)
self.assertEqual(mexp, mexp_taylor, atol=1e-2, rtol=0.0)
# single matrix
run_test(2, 2)
run_test(3, 3)
run_test(4, 4)
run_test(5, 5)
# small batch of matrices
run_test(3, 2, 2)
run_test(3, 3, 3)
run_test(3, 4, 4)
run_test(3, 5, 5)
# large batch of matrices
run_test(3, 3, 2, 2)
run_test(3, 3, 3, 3)
run_test(3, 3, 4, 4)
run_test(3, 3, 5, 5)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_slogdet(self, device, dtype):
from torch.testing._internal.common_utils import (random_hermitian_matrix, random_hermitian_psd_matrix,
random_hermitian_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: hermitian, hermitian_psd, hermitian_pd, singular, non_singular
def run_test(matsize, batchdims, mat_chars):
num_matrices = np.prod(batchdims)
list_of_matrices = []
if num_matrices != 0:
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'hermitian':
list_of_matrices.append(random_hermitian_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_psd':
list_of_matrices.append(random_hermitian_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'hermitian_pd':
list_of_matrices.append(random_hermitian_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'singular':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_singular':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
else:
full_tensor = torch.randn(*batchdims, matsize, matsize, dtype=dtype, device=device)
actual_value = torch.linalg.slogdet(full_tensor)
expected_value = np.linalg.slogdet(full_tensor.cpu().numpy())
self.assertEqual(expected_value[0], actual_value[0], atol=self.precision, rtol=self.precision)
self.assertEqual(expected_value[1], actual_value[1], atol=self.precision, rtol=self.precision)
# test out=variant
sign_out = torch.empty_like(actual_value[0])
logabsdet_out = torch.empty_like(actual_value[1])
ans = torch.linalg.slogdet(full_tensor, out=(sign_out, logabsdet_out))
self.assertEqual(ans[0], sign_out)
self.assertEqual(ans[1], logabsdet_out)
self.assertEqual(sign_out, actual_value[0])
self.assertEqual(logabsdet_out, actual_value[1])
for matsize, batchdims in itertools.product([0, 3, 5], [(0,), (3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['hermitian_pd'])
run_test(matsize, batchdims, mat_chars=['singular'])
run_test(matsize, batchdims, mat_chars=['non_singular'])
run_test(matsize, batchdims, mat_chars=['hermitian', 'hermitian_pd', 'hermitian_psd'])
run_test(matsize, batchdims, mat_chars=['singular', 'non_singular'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_slogdet_errors_and_warnings(self, device, dtype):
# slogdet requires the input to be a square matrix or batch of square matrices
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must be batches of square matrices'):
torch.linalg.slogdet(a)
# slogdet requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, r'must have at least 2 dimensions'):
torch.linalg.slogdet(a)
# slogdet requires the input to be of float, double, cfloat or cdouble types
a = torch.randn(2, 2, device=device, dtype=torch.bfloat16)
with self.assertRaisesRegex(RuntimeError, r'of float, double, cfloat or cdouble types'):
torch.linalg.slogdet(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(2, 3, 3, device=device, dtype=dtype)
sign_out = torch.empty(1, device=device, dtype=dtype)
real_dtype = a.real.dtype if dtype.is_complex else dtype
logabsdet_out = torch.empty(1, device=device, dtype=real_dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
sign_out = torch.empty_like(a).to(torch.int)
logabsdet_out = torch.empty_like(a).to(torch.int)
with self.assertRaisesRegex(RuntimeError, "but got sign with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
sign_out = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "but got logabsdet with dtype Int"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
sign_out = torch.empty(0, device=wrong_device, dtype=dtype)
logabsdet_out = torch.empty(0, device=wrong_device, dtype=real_dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.linalg.slogdet(a, out=(sign_out, logabsdet_out))
@skipCUDAIf(torch.version.cuda is not None
and torch.version.cuda.split(".") < ["11", "3"], "There's a bug in cuSOLVER < 11.3")
@skipCUDAIfNoMagmaAndNoCusolver
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet(self, device, dtype):
def reference_slogdet(M):
sdet, logabsdet = np.linalg.slogdet(M.detach().cpu().numpy())
return M.new_tensor(sdet), M.new_tensor(logabsdet)
def test_single_det(M, target, desc):
target_sdet, target_logabsdet = target
det = M.det()
logdet = M.logdet()
sdet, logabsdet = M.slogdet()
linalg_sdet, linalg_logabsdet = torch.linalg.slogdet(M)
# Test det
self.assertEqual(det, target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (det)'.format(desc))
# Test slogdet
# Compare the overall value rather than individual parts because of
# precision issues when det is near zero.
self.assertEqual(sdet * logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (slogdet)'.format(desc))
self.assertEqual(linalg_sdet * linalg_logabsdet.exp(), target_sdet * target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (linalg_slogdet)'.format(desc))
# Test logdet
# Compare logdet against our own pytorch slogdet because they should
# be consistent, while it may behave slightly differently with other
# slogdet implementations when det is near zero due to precision
# issues.
if sdet.item() < 0:
self.assertTrue(logdet.item() != logdet.item(), '{} (logdet negative case)'.format(desc))
else:
self.assertEqual(logdet.exp(), target_logabsdet.exp(),
atol=1e-6, rtol=0, msg='{} (logdet non-negative case)'.format(desc))
eye = torch.eye(5, dtype=dtype, device=device)
test_single_det(eye, (torch.ones((), dtype=dtype, device=device), torch.zeros((), dtype=dtype, device=device)), 'identity')
# Testing bug in #34061 (https://github.com/pytorch/pytorch/issues/34061)
for n in range(250, 551, 100):
mat = torch.randn(n, n, dtype=dtype, device=device)
q, _ = torch.qr(mat)
ref_det, ref_logabsdet = reference_slogdet(q)
test_single_det(q, (ref_det, ref_logabsdet), 'orthogonal')
def test(M):
assert M.size(0) >= 5, 'this helper fn assumes M to be at least 5x5'
M = M.to(device)
ref_M_sdet, ref_M_logabsdet = reference_slogdet(M)
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'basic')
if ref_M_logabsdet.exp().item() >= 1e-6: # skip singular
M_inv = M.inverse()
test_single_det(M_inv, reference_slogdet(M_inv), 'inverse')
test_single_det(M, (ref_M_sdet, ref_M_logabsdet), 'transpose')
for x in [0, 2, 4]:
for scale in [-2, -0.1, 0, 10]:
if scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(scale)
elif scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-scale)
# dim 0
M_clone = M.clone()
M_clone[:, x] *= scale
test_single_det(M_clone, target, 'scale a row')
# dim 1
M_clone = M.clone()
M_clone[x, :] *= scale
test_single_det(M_clone, target, 'scale a column')
for x1, x2 in [(0, 3), (4, 1), (3, 2)]:
assert x1 != x2, 'x1 and x2 needs to be different for this test'
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
# dim 0
M_clone = M.clone()
M_clone[:, x2] = M_clone[:, x1]
test_single_det(M_clone, target, 'two rows are same')
# dim 1
M_clone = M.clone()
M_clone[x2, :] = M_clone[x1, :]
test_single_det(M_clone, target, 'two columns are same')
for scale1, scale2 in [(0.3, -1), (0, 2), (10, 0.1)]:
det_scale = scale1 * scale2 * -1
if det_scale > 0:
target = ref_M_sdet, ref_M_logabsdet + math.log(det_scale)
elif det_scale == 0:
target = torch.zeros_like(ref_M_sdet), torch.full_like(ref_M_logabsdet, -inf)
else:
target = ref_M_sdet.neg(), ref_M_logabsdet + math.log(-det_scale)
# dim 0
M_clone = M.clone()
t = M_clone[:, x1] * scale1
M_clone[:, x1] += M_clone[:, x2] * scale2
M_clone[:, x2] = t
test_single_det(M_clone, target, 'exchanging rows')
# dim 1
M_clone = M.clone()
t = M_clone[x1, :] * scale1
M_clone[x1, :] += M_clone[x2, :] * scale2
M_clone[x2, :] = t
test_single_det(M_clone, target, 'exchanging columns')
def get_random_mat_scale(n):
# For matrices with values i.i.d. with 0 mean, unit variance, and
# subexponential tail, we have:
# E[log det(A^2)] \approx log((n-1)!)
#
# Notice:
# log Var[det(A)] = log E[det(A^2)] >= E[log det(A^2)]
#
# So:
# stddev[det(A)] >= sqrt( (n-1)! )
#
# We use this as an intuitive guideline to scale random generated
# matrices so our closeness tests can work more robustly:
# scale by sqrt( (n-1)! )^(-1/n) = ( (n-1)! )^(-1/(2n))
#
# source: https://arxiv.org/pdf/1112.0752.pdf
# TODO: technically we need subexponential distn for this to hold,
# but we mostly use gaussian entries below. Consider switching
# to Chi-sq if this turns out not stable enough, since Chi-sq
# is easy enough to sample from.
return math.factorial(n - 1) ** (-1.0 / (2 * n))
for n in [5, 10, 25]:
scale = get_random_mat_scale(n)
test(torch.randn(n, n, dtype=dtype, device=device) * scale)
r = torch.randn(n, n, dtype=dtype, device=device) * scale
# symmetric psd
test(r.mm(r.t()))
# symmetric pd
r = torch.randn(n, n, dtype=dtype, device=device) * scale
test(r.mm(r.t()) + torch.eye(n, dtype=dtype, device=device) * 1e-6)
# symmetric
r = torch.randn(n, n, dtype=dtype, device=device) * scale
for i in range(n):
for j in range(i):
r[i, j] = r[j, i]
test(r)
# non-contiguous
test((torch.randn(n, n, n + 1, dtype=dtype, device=device) * scale)[:, 2, 1:])
# det = 0
r = torch.randn(n, n, dtype=dtype, device=device) * scale
u, s, v = r.svd()
if reference_slogdet(u)[0] < 0:
u = -u
if reference_slogdet(v)[0] < 0:
v = -v
s[0] *= -1
s[-1] = 0
test(u.mm(s.diag()).mm(v))
# Small values to test numerical stability. Note that we don't scale
# this matrix.
r = torch.randn(512, 512, dtype=dtype, device=device)
u, s, v = r.svd()
s.fill_(1. / (100 * s.numel()))
test(u.mm(s.diag()).mm(v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_det_logdet_slogdet_batched(self, device, dtype):
from torch.testing._internal.common_utils import (random_symmetric_matrix, random_symmetric_psd_matrix,
random_symmetric_pd_matrix, random_square_matrix_of_rank)
# mat_chars denotes matrix characteristics
# possible values are: sym, sym_psd, sym_pd, sing, non_sym
def run_test(matsize, batchdims, mat_chars):
num_matrices = reduce(lambda x, y: x * y, batchdims, 1)
list_of_matrices = []
for idx in range(num_matrices):
mat_type = idx % len(mat_chars)
if mat_chars[mat_type] == 'sym':
list_of_matrices.append(random_symmetric_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_psd':
list_of_matrices.append(random_symmetric_psd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sym_pd':
list_of_matrices.append(random_symmetric_pd_matrix(matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'sing':
list_of_matrices.append(torch.ones(matsize, matsize, dtype=dtype, device=device))
elif mat_chars[mat_type] == 'non_sing':
list_of_matrices.append(random_square_matrix_of_rank(matsize, matsize, dtype=dtype, device=device))
full_tensor = torch.stack(list_of_matrices, dim=0).reshape(batchdims + (matsize, matsize))
# Scaling adapted from `get_random_mat_scale` in _test_det_logdet_slogdet
full_tensor *= (math.factorial(matsize - 1) ** (-1.0 / (2 * matsize)))
for fn in [torch.det, torch.logdet, torch.slogdet, torch.linalg.slogdet]:
expected_value = []
actual_value = fn(full_tensor)
for full_idx in itertools.product(*map(lambda x: list(range(x)), batchdims)):
expected_value.append(fn(full_tensor[full_idx]))
if fn == torch.slogdet or fn == torch.linalg.slogdet:
sign_value = torch.stack([tup[0] for tup in expected_value], dim=0).reshape(batchdims)
expected_value = torch.stack([tup[1] for tup in expected_value], dim=0).reshape(batchdims)
self.assertEqual(sign_value, actual_value[0])
self.assertEqual(expected_value, actual_value[1])
else:
expected_value = torch.stack(expected_value, dim=0).reshape(batchdims)
self.assertEqual(actual_value, expected_value)
for matsize, batchdims in itertools.product([3, 5], [(3,), (5, 3)]):
run_test(matsize, batchdims, mat_chars=['sym_pd'])
run_test(matsize, batchdims, mat_chars=['sing'])
run_test(matsize, batchdims, mat_chars=['non_sing'])
run_test(matsize, batchdims, mat_chars=['sym', 'sym_pd', 'sym_psd'])
run_test(matsize, batchdims, mat_chars=['sing', 'non_sing'])
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_pd_matrix
def run_test(shape, batch, upper, contiguous):
A = random_hermitian_pd_matrix(shape, *batch, dtype=dtype, device=device)
if A.numel() > 0 and not contiguous:
A = A.mT
self.assertFalse(A.is_contiguous())
L = torch.linalg.cholesky(A)
expected_inverse = torch.inverse(A)
L = L.mH if upper else L
actual_inverse = torch.cholesky_inverse(L, upper)
self.assertEqual(actual_inverse, expected_inverse)
shapes = (0, 3, 5)
batches = ((), (0,), (3, ), (2, 2))
for shape, batch, upper, contiguous in list(itertools.product(shapes, batches, (True, False), (True, False))):
run_test(shape, batch, upper, contiguous)
# check the out= variant
A = random_hermitian_pd_matrix(3, 2, dtype=dtype, device=device)
L = torch.linalg.cholesky(A)
# There are two code paths currently for the out= variant
# 1. When 'out' tensor is in Fortran (column-major) memory format
# then the fast route is taken and the storage is reused directly in the computations
# 2. When 'out' tensor is not in Fortran format then a temporary tensor is allocated internally
# and the result is copied from the temporary tensor to 'out' tensor
# This test checks the first code path
out = torch.empty_like(A)
out_t = out.mT.clone(memory_format=torch.contiguous_format)
out = out_t.mT
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
# This test checks the second code path
out = torch.empty_like(A)
ans = torch.cholesky_inverse(L, out=out)
self.assertEqual(ans, out)
expected = torch.inverse(A)
self.assertEqual(expected, out)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_cholesky_inverse_errors_and_warnings(self, device, dtype):
# cholesky_inverse requires the input to be at least 2 dimensional tensor
a = torch.randn(2, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must have at least 2 dimensions"):
torch.cholesky_inverse(a)
# cholesky_inverse requires a square matrix
a = torch.randn(2, 3, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "must be batches of square matrices"):
torch.cholesky_inverse(a)
# if non-empty out tensor with wrong shape is passed a warning is given
a = torch.randn(3, 3, device=device, dtype=dtype)
out = torch.empty(2, 3, device=device, dtype=dtype)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.cholesky_inverse(a, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out = torch.empty(*a.shape, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.cholesky_inverse(a, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.cholesky_inverse(a, out=out)
# cholesky_inverse raises an error for invalid inputs on CPU
# for example if at least one diagonal element is zero
a = torch.randn(3, 3, device=device, dtype=dtype)
a[1, 1] = 0
if self.device_type == 'cpu':
with self.assertRaisesRegex(torch.linalg.LinAlgError, r"cholesky_inverse: The diagonal element 2 is zero"):
torch.cholesky_inverse(a)
# cholesky_inverse on GPU does not raise an error for this case
elif self.device_type == 'cuda':
out = torch.cholesky_inverse(a)
self.assertTrue(out.isinf().any() or out.isnan().any())
def _select_broadcastable_dims(self, dims_full=None):
# select full dimensionality
if dims_full is None:
dims_full = []
ndims = random.randint(1, 4)
dims_full = [random.randint(1, 8) for _ in range(ndims)]
else:
ndims = len(dims_full)
# select actual dimensions for ops:
# larger: full ndims, individual sizes may be reduced
# smaller: possibly reduced ndims, sizes may be reduced
smaller_ndims = random.randint(1, ndims)
dims_small = []
dims_large = []
for i in range(ndims - 1, -1, -1):
j = random.randint(1, 3)
if j == 1: # no reduced singleton dimension
ds = dims_full[i]
dl = dims_full[i]
elif j == 2: # larger may have reduced singleton dimension
ds = dims_full[i]
dl = 1 if len(dims_small) < smaller_ndims else dims_full[i]
elif j == 3: # smaller may have reduced singleton dimension
ds = 1
dl = dims_full[i]
dims_large = [dl] + dims_large
if len(dims_small) < smaller_ndims:
dims_small = [ds] + dims_small
return (dims_small, dims_large, dims_full)
def test_broadcast_fused_matmul(self, device):
fns = ["baddbmm", "addbmm", "addmm", "addmv", "addr"]
for fn in fns:
batch_dim = random.randint(1, 8)
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
def dims_full_for_fn():
if fn == "baddbmm":
return ([batch_dim, n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addbmm":
return ([n_dim, p_dim], [batch_dim, n_dim, m_dim], [batch_dim, m_dim, p_dim])
elif fn == "addmm":
return ([n_dim, p_dim], [n_dim, m_dim], [m_dim, p_dim])
elif fn == "addmv":
return ([n_dim], [n_dim, m_dim], [m_dim])
elif fn == "addr":
return ([n_dim, m_dim], [n_dim], [m_dim])
else:
raise AssertionError("unknown function")
(t0_dims_full, t1_dims, t2_dims) = dims_full_for_fn()
(t0_dims_small, _, _) = self._select_broadcastable_dims(t0_dims_full)
t0_small = torch.randn(*t0_dims_small, device=device).float()
t1 = torch.randn(*t1_dims, device=device).float()
t2 = torch.randn(*t2_dims, device=device).float()
t0_full = t0_small.expand(*t0_dims_full).to(device)
fntorch = getattr(torch, fn)
r0 = fntorch(t0_small, t1, t2)
r1 = fntorch(t0_full, t1, t2)
self.assertEqual(r0, r1)
@tf32_on_and_off(0.001)
def test_broadcast_batched_matmul(self, device):
n_dim = random.randint(1, 8)
m_dim = random.randint(1, 8)
p_dim = random.randint(1, 8)
full_batch_dims = [random.randint(1, 3) for i in range(random.randint(1, 3))]
(batch_dims_small, _, _) = self._select_broadcastable_dims(full_batch_dims)
def verify_batched_matmul(full_lhs, one_dimensional):
if not one_dimensional:
lhs_dims = [n_dim, m_dim]
rhs_dims = [m_dim, p_dim]
result_dims = [n_dim, p_dim]
else:
lhs_dims = [n_dim, m_dim] if full_lhs else [m_dim]
rhs_dims = [m_dim, p_dim] if not full_lhs else [m_dim]
result_dims = [n_dim] if full_lhs else [p_dim]
lhs_mat_dims = lhs_dims if len(lhs_dims) != 1 else [1, m_dim]
rhs_mat_dims = rhs_dims if len(rhs_dims) != 1 else [m_dim, 1]
full_mat_dims = lhs_mat_dims if full_lhs else rhs_mat_dims
dim0_dims = rhs_dims if full_lhs else lhs_dims
small_dims = batch_dims_small + (rhs_mat_dims if full_lhs else lhs_mat_dims)
small = torch.randn(*(small_dims), device=device).float()
dim0 = torch.randn(*(dim0_dims), device=device).float()
full = torch.randn(*(full_batch_dims + full_mat_dims), device=device).float()
if not one_dimensional:
(lhsTensors, rhsTensors) = ((full,), (small, dim0)) if full_lhs else ((small, dim0), (full,))
else:
(lhsTensors, rhsTensors) = ((full,), (dim0,)) if full_lhs else ((dim0,), (full,))
def maybe_squeeze_result(l, r, result):
if len(lhs_dims) == 1 and l.dim() != 1:
return result.squeeze(-2)
elif len(rhs_dims) == 1 and r.dim() != 1:
return result.squeeze(-1)
else:
return result
for lhs in lhsTensors:
lhs_expanded = lhs.expand(*(torch.Size(full_batch_dims) + torch.Size(lhs_mat_dims)))
lhs_expanded_matmul_fn = lhs_expanded.matmul
for rhs in rhsTensors:
rhs_expanded = ((rhs if len(rhs_dims) != 1 else rhs.unsqueeze(-1)).
expand(*(torch.Size(full_batch_dims) + torch.Size(rhs_mat_dims))))
truth = maybe_squeeze_result(lhs_expanded, rhs_expanded, lhs_expanded_matmul_fn(rhs_expanded))
for l in (lhs, lhs_expanded):
for r in (rhs, rhs_expanded):
l_matmul_fn = l.matmul
result = maybe_squeeze_result(l, r, l_matmul_fn(r))
self.assertEqual(truth, result)
# test torch.matmul function as well
torch_result = maybe_squeeze_result(l, r, torch.matmul(l, r))
self.assertEqual(truth, torch_result)
# test torch.matmul with out
out = torch.zeros_like(torch_result)
torch.matmul(l, r, out=out)
self.assertEqual(truth, maybe_squeeze_result(l, r, out))
# compare to bmm
bmm_result = (torch.bmm(lhs_expanded.contiguous().view(-1, *lhs_mat_dims),
rhs_expanded.contiguous().view(-1, *rhs_mat_dims)))
self.assertEqual(truth.view(-1, *result_dims), bmm_result.view(-1, *result_dims))
for indices in itertools.product((True, False), repeat=2):
verify_batched_matmul(*indices)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_non_contiguous(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
A = make_A(2, 2, 2)
b = torch.randn(2, 2, 2, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu().permute(0, 2, 1).numpy(), b.cpu().permute(2, 1, 0).numpy())
A = A.permute(0, 2, 1)
b = b.permute(2, 1, 0)
assert not A.is_contiguous() and not b.is_contiguous(), "contiguous inputs"
LU_data, LU_pivots = torch.lu(A)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
def lu_solve_test_helper(self, A_dims, b_dims, pivot, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
b = torch.randn(*b_dims, dtype=dtype, device=device)
A = make_A(*A_dims)
LU_data, LU_pivots, info = torch.lu(A, get_infos=True, pivot=pivot)
self.assertEqual(info, torch.zeros_like(info))
return b, A, LU_data, LU_pivots
@skipCPUIfNoLapack
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve(self, device, dtype):
def sub_test(pivot):
for k, n in zip([2, 3, 5], [3, 5, 7]):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper((n, n), (n, k), pivot, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(b, np.matmul(A.cpu(), x.cpu()))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
@precisionOverride({torch.float32: 1e-3, torch.complex64: 1e-3,
torch.float64: 1e-8, torch.complex128: 1e-8})
def test_lu_solve_batched(self, device, dtype):
def sub_test(pivot):
def lu_solve_batch_test_helper(A_dims, b_dims, pivot):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, pivot, device, dtype)
x_exp_list = []
for i in range(b_dims[0]):
x_exp_list.append(torch.lu_solve(b[i], LU_data[i], LU_pivots[i]))
x_exp = torch.stack(x_exp_list) # Stacked output
x_act = torch.lu_solve(b, LU_data, LU_pivots) # Actual output
self.assertEqual(x_exp, x_act) # Equality check
Ax = np.matmul(A.cpu(), x_act.cpu())
self.assertEqual(b, Ax)
for batchsize in [1, 3, 4]:
lu_solve_batch_test_helper((batchsize, 5, 5), (batchsize, 5, 10), pivot)
# Tests tensors with 0 elements
b = torch.randn(3, 0, 3, dtype=dtype, device=device)
A = torch.randn(3, 0, 0, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(A)
self.assertEqual(torch.empty_like(b), b.lu_solve(LU_data, LU_pivots))
sub_test(True)
if self.device_type == 'cuda':
sub_test(False)
@slowTest
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_many_batches(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((65536, 5, 5), (65536, 5, 10))
run_test((262144, 5, 5), (262144, 5, 10))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_lu_solve_batched_broadcasting(self, device, dtype):
make_fullrank = make_fullrank_matrices_with_distinct_singular_values
make_A = partial(make_fullrank, device=device, dtype=dtype)
def run_test(A_dims, b_dims, pivot=True):
A_matrix_size = A_dims[-1]
A_batch_dims = A_dims[:-2]
A = make_A(*A_batch_dims, A_matrix_size, A_matrix_size)
b = make_tensor(b_dims, dtype=dtype, device=device)
x_exp = np.linalg.solve(A.cpu(), b.cpu())
LU_data, LU_pivots = torch.lu(A, pivot=pivot)
x = torch.lu_solve(b, LU_data, LU_pivots)
self.assertEqual(x, x_exp)
# test against numpy.linalg.solve
run_test((2, 1, 3, 4, 4), (2, 1, 3, 4, 6)) # no broadcasting
run_test((2, 1, 3, 4, 4), (4, 6)) # broadcasting b
run_test((4, 4), (2, 1, 3, 4, 2)) # broadcasting A
run_test((1, 3, 1, 4, 4), (2, 1, 3, 4, 5)) # broadcasting A & b
@onlyCUDA
@skipCUDAIfNoMagma
@dtypes(*floating_and_complex_types())
# this tests https://github.com/pytorch/pytorch/issues/36921
def test_lu_solve_large_matrices(self, device, dtype):
def run_test(A_dims, b_dims):
b, A, LU_data, LU_pivots = self.lu_solve_test_helper(A_dims, b_dims, True, device, dtype)
x = torch.lu_solve(b, LU_data, LU_pivots)
Ax = torch.matmul(A, x)
self.assertEqual(Ax, b.expand_as(Ax))
run_test((1, 1), (1, 1, 1025))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_lu_solve_out_errors_and_warnings(self, device, dtype):
# dtypes should be safely castable
a = torch.eye(2, dtype=dtype, device=device)
LU_data, LU_pivots = torch.lu(a, pivot=True)
b = torch.randn(2, 1, dtype=dtype, device=device)
out = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got result with dtype Int"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out = torch.empty(0, dtype=dtype, device=wrong_device)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# if out tensor with wrong shape is passed a warning is given
with warnings.catch_warnings(record=True) as w:
out = torch.empty(1, dtype=dtype, device=device)
# Trigger warning
torch.lu_solve(b, LU_data, LU_pivots, out=out)
# Check warning occurs
self.assertEqual(len(w), 1)
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
@precisionOverride({torch.float32: 1e-5, torch.complex64: 1e-5})
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_symeig(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
def run_test(dims, eigenvectors, upper):
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
if dtype.is_complex:
real_dtype = torch.float32 if dtype is torch.complex64 else torch.float64
else:
real_dtype = dtype
oute = torch.empty(dims[1:] + dims[:1], dtype=real_dtype, device=device)
outv = torch.empty(dims[1:] + dims[:1] * 2, dtype=dtype, device=device)
torch.symeig(x, eigenvectors=eigenvectors, upper=upper, out=(oute, outv))
if eigenvectors:
outv_ = outv.cpu().numpy()
x_recon = np.matmul(np.matmul(outv_, torch.diag_embed(oute.to(dtype)).cpu().numpy()),
outv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, oute, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), outv, msg='Eigenvector matrix not empty')
rese, resv = x.symeig(eigenvectors=eigenvectors, upper=upper)
self.assertEqual(rese, oute, msg="outputs of symeig and symeig with out don't match")
self.assertEqual(resv, outv, msg="outputs of symeig and symeig with out don't match")
# test non-contiguous
x = random_hermitian_matrix(*dims, dtype=dtype, device=device)
n_dim = len(dims) + 1
# Reverse the batch dimensions and the matrix dimensions and then concat them
x = x.permute(tuple(range(n_dim - 3, -1, -1)) + (n_dim - 1, n_dim - 2))
assert not x.is_contiguous(), "x is intentionally non-contiguous"
rese, resv = torch.symeig(x, eigenvectors=eigenvectors, upper=upper)
if eigenvectors:
resv_ = resv.cpu().numpy()
x_recon = np.matmul(np.matmul(resv_, torch.diag_embed(rese.to(dtype)).cpu().numpy()),
resv_.swapaxes(-2, -1).conj())
self.assertEqual(x, x_recon, atol=1e-8, rtol=0, msg='Incorrect reconstruction using V @ diag(e) @ V.T')
else:
eigvals, _ = torch.symeig(x, eigenvectors=True, upper=upper)
self.assertEqual(eigvals, rese, msg='Eigenvalues mismatch')
self.assertEqual(torch.empty(0, device=device, dtype=dtype), resv, msg='Eigenvector matrix not empty')
batch_dims_set = [(), (3,), (3, 5), (5, 3, 5)]
for batch_dims, eigenvectors, upper in itertools.product(batch_dims_set, (True, False), (True, False)):
run_test((5,) + batch_dims, eigenvectors, upper)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_symeig_out_errors_and_warnings(self, device, dtype):
from torch.testing._internal.common_utils import random_hermitian_matrix
# if non-empty out tensor with wrong shape is passed a warning is given
a = random_hermitian_matrix(3, dtype=dtype, device=device)
real_dtype = a.real.dtype if dtype.is_complex else dtype
out_w = torch.empty(7, 7, dtype=real_dtype, device=device)
out_v = torch.empty(7, 7, dtype=dtype, device=device)
with warnings.catch_warnings(record=True) as w:
# Trigger warning
torch.symeig(a, out=(out_w, out_v))
self.assertTrue("An output with one or more elements was resized" in str(w[-2].message))
self.assertTrue("An output with one or more elements was resized" in str(w[-1].message))
# dtypes should be safely castable
out_w = torch.empty(0, dtype=real_dtype, device=device)
out_v = torch.empty(0, dtype=torch.int, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvectors with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, dtype=torch.int, device=device)
out_v = torch.empty(0, dtype=dtype, device=device)
with self.assertRaisesRegex(RuntimeError, "but got eigenvalues with dtype Int"):
torch.symeig(a, out=(out_w, out_v))
# device should match
if torch.cuda.is_available():
wrong_device = 'cpu' if self.device_type != 'cpu' else 'cuda'
out_w = torch.empty(0, device=wrong_device, dtype=dtype)
out_v = torch.empty(0, device=device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
out_w = torch.empty(0, device=device, dtype=dtype)
out_v = torch.empty(0, device=wrong_device, dtype=dtype)
with self.assertRaisesRegex(RuntimeError, "tensors to be on the same device"):
torch.symeig(a, out=(out_w, out_v))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_pca_lowrank(self, device):
from torch.testing._internal.common_utils import random_lowrank_matrix, random_sparse_matrix
dtype = torch.double
def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **options):
density = options.pop('density', 1)
if isinstance(matrix_size, int):
rows = columns = matrix_size
else:
rows, columns = matrix_size
if density == 1:
a_input = random_lowrank_matrix(actual_rank, rows, columns, *batches, device=device, dtype=dtype)
a = a_input
else:
a_input = random_sparse_matrix(rows, columns, density, device=device, dtype=dtype)
a = a_input.to_dense()
u, s, v = pca(a_input, q=guess_rank, **options)
self.assertEqual(s.shape[-1], guess_rank)
self.assertEqual(u.shape[-2], rows)
self.assertEqual(u.shape[-1], guess_rank)
self.assertEqual(v.shape[-1], guess_rank)
self.assertEqual(v.shape[-2], columns)
A1 = u.matmul(s.diag_embed()).matmul(v.mT)
ones_m1 = torch.ones(batches + (rows, 1), dtype=a.dtype, device=device)
c = a.sum(axis=-2) / rows
c = c.reshape(batches + (1, columns))
A2 = a - ones_m1.matmul(c)
self.assertEqual(A1, A2)
if density == 1:
# actual rank is known only for dense input
detect_rank = (s.abs() > 1e-5).sum(axis=-1)
self.assertEqual(actual_rank * torch.ones(batches, device=device, dtype=torch.int64), detect_rank)
S = torch.linalg.svdvals(A2)
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])
all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
(12, (1000, 1000), [()]),
]:
for batches in all_batches:
for guess_rank in [
actual_rank,
actual_rank + 2,
actual_rank + 6,
]:
if guess_rank <= min(*size):
run_subtest(guess_rank, actual_rank, size, batches, device, torch.pca_lowrank)
run_subtest(guess_rank, actual_rank, size[::-1], batches, device, torch.pca_lowrank)
# sparse input
for guess_rank, size in [
(4, (17, 4)), (4, (4, 17)), (16, (17, 17)),
(21, (100, 40)), (20, (40, 100)), (600, (1000, 1000))]:
for density in [0.005, 0.1]:
run_subtest(guess_rank, None, size, (), device, torch.pca_lowrank, density=density)
# jitting support
jitted = torch.jit.script(torch.pca_lowrank)
guess_rank, actual_rank, size, batches = 2, 2, (17, 4), ()
run_subtest(guess_rank, actual_rank, size, batches, device, jitted)
# Ensure that nuclear_norm's out variant gives the same result as the non-out
@onlyNativeDeviceTypes
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.float32, torch.float64)
def test_nuclear_norm_out(self, device, dtype):
test_cases = [
# input size, dim
((25, 25), None),
((25, 25), (0, 1)),
((25, 25), (1, 0)),
((25, 25, 25), (2, 0)),
((25, 25, 25), (0, 1)),
]
for keepdim in [False, True]:
for input_size, dim in test_cases:
msg = f'input_size: {input_size}, dim: {dim}, keepdim: {keepdim}'
x = torch.randn(*input_size, device=device, dtype=dtype)
result_out = torch.empty(0, device=device, dtype=dtype)
if dim is None:
result = torch.nuclear_norm(x, keepdim=keepdim)
torch.nuclear_norm(x, keepdim=keepdim, out=result_out)
else:
result = torch.nuclear_norm(x, keepdim=keepdim, dim=dim)
torch.nuclear_norm(x, keepdim=keepdim, dim=dim, out=result_out)
self.assertEqual(result, result_out, msg=msg)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(*floating_and_complex_types())
def test_geqrf(self, device, dtype):
def run_test(shape):
# numpy.linalg.qr with mode = 'raw' computes the same operation as torch.geqrf
# so this test compares against that function
A = make_tensor(shape, dtype=dtype, device=device)
# numpy.linalg.qr doesn't work with batched input
m, n = A.shape[-2:]
tau_size = "n" if m > n else "m"
np_dtype = A.cpu().numpy().dtype
ot = [np_dtype, np_dtype]
numpy_geqrf_batched = np.vectorize(
lambda x: np.linalg.qr(x, mode='raw'),
otypes=ot,
signature=f'(m,n)->(n,m),({tau_size})')
expected = numpy_geqrf_batched(A.cpu())
actual = torch.geqrf(A)
# numpy.linalg.qr returns transposed result
self.assertEqual(expected[0].swapaxes(-2, -1), actual[0])
self.assertEqual(expected[1], actual[1])
batches = [(), (0, ), (2, ), (2, 1)]
ns = [5, 2, 0]
for batch, (m, n) in product(batches, product(ns, ns)):
run_test((*batch, m, n))
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
@dtypes(torch.double)
def test_lstsq(self, device, dtype):
def _test_underdetermined(a, b, expectedNorm):
# underdetermined systems are only supported on CPU
if self.device_type != 'cpu':
return
m = a.size()[0]
n = a.size()[1]
assert(m <= n)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
self.assertEqual((torch.mm(a, res1) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
res3 = torch.lstsq(b, a, out=(b, a))[0]
self.assertEqual((torch.mm(a_copy, b) - b_copy).norm(), expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
def _test_overdetermined(a, b, expectedNorm):
m = a.size()[0]
n = a.size()[1]
assert(m > n)
def check_norm(a, b, expected_norm, gels_result):
# Checks |ax - b| and the residual info from the result
# The first n rows is the least square solution.
# Rows n to m-1 contain residual information.
x = gels_result[:n]
resid_info = gels_result[n:]
resid_norm = (torch.mm(a, x) - b).norm()
self.assertEqual(resid_norm, expectedNorm, atol=1e-8, rtol=0)
self.assertEqual(resid_info.norm(), resid_norm, atol=1e-8, rtol=0)
a_copy = a.clone()
b_copy = b.clone()
res1 = torch.lstsq(b, a)[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res1)
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
res2 = torch.lstsq(b, a, out=(tb, ta))[0]
self.assertEqual(a, a_copy, atol=0, rtol=0)
self.assertEqual(b, b_copy, atol=0, rtol=0)
check_norm(a, b, expectedNorm, res2)
res3 = torch.lstsq(b, a, out=(b, a))[0]
check_norm(a_copy, b_copy, expectedNorm, res3)
self.assertEqual(res1, tb, atol=0, rtol=0)
self.assertEqual(res1, b, atol=0, rtol=0)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertEqual(res1, res3, atol=0, rtol=0)
# basic test
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test overdetermined
expectedNorm = 17.390200628863
a = torch.tensor(((1.44, -9.96, -7.55, 8.34, 7.08, -5.45),
(-7.84, -0.28, 3.24, 8.09, 2.52, -5.70),
(-4.39, -3.24, 6.27, 5.28, 0.74, -1.19),
(4.53, 3.83, -6.64, 2.06, -2.47, 4.70)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28, 5.72, 8.93),
(9.35, -4.43, -0.70, -0.26, -7.36, -2.52)), dtype=dtype, device=device).t()
_test_overdetermined(a, b, expectedNorm)
# test underdetermined
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55),
(-7.84, -0.28, 3.24),
(-4.39, -3.24, 6.27),
(4.53, 3.83, -6.64)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48),
(9.35, -4.43, -0.70)), dtype=dtype, device=device).t()
_test_underdetermined(a, b, expectedNorm)
# test reuse
expectedNorm = 0
a = torch.tensor(((1.44, -9.96, -7.55, 8.34),
(-7.84, -0.28, 3.24, 8.09),
(-4.39, -3.24, 6.27, 5.28),
(4.53, 3.83, -6.64, 2.06)), dtype=dtype, device=device).t()
b = torch.tensor(((8.58, 8.26, 8.48, -5.28),
(9.35, -4.43, -0.70, -0.26)), dtype=dtype, device=device).t()
ta = torch.tensor((), dtype=dtype, device=device)
tb = torch.tensor((), dtype=dtype, device=device)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
torch.lstsq(b, a, out=(tb, ta))
self.assertEqual((torch.mm(a, tb) - b).norm(), expectedNorm, atol=1e-8, rtol=0)
@skipCUDAIfNoMagma
@skipCPUIfNoLapack
def test_lapack_empty(self, device):
# FIXME: these are just a selection of LAPACK functions -- we need a general strategy here.
# The LAPACK functions themselves generally do NOT work with zero sized dimensions, although
# numpy/sci often has a direct wrapper (e.g. lu_factor) and a wrapper that "does the right thing"
# (e.g. lu). We often name our functions identically to the lapack function, so it will take work
# to name / migrate-to better wrappers.
def fn(torchfn, *args):
return torchfn(*tuple(torch.randn(shape, device=device) if isinstance(shape, tuple) else shape
for shape in args))
# inverse, pinverse
self.assertEqual((0, 0), fn(torch.inverse, (0, 0)).shape)
self.assertEqual((5, 0), fn(torch.pinverse, (0, 5)).shape)
self.assertEqual((0, 5), fn(torch.pinverse, (5, 0)).shape)
self.assertEqual((0, 0), fn(torch.pinverse, (0, 0)).shape)
# det, logdet, slogdet
self.assertEqual(torch.tensor(1., device=device), fn(torch.det, (0, 0)))
self.assertEqual(torch.tensor(0., device=device), fn(torch.logdet, (0, 0)))
self.assertEqual((torch.tensor(1., device=device), torch.tensor(0., device=device)),
fn(torch.slogdet, (0, 0)))
# eig, symeig
evalues, evectors = fn(torch.eig, (0, 0), True)
self.assertEqual([(0, 2), (0, 0)], [evalues.shape, evectors.shape])
evalues, evectors = fn(torch.symeig, (0, 0), True)
self.assertEqual([(0,), (0, 0)], [evalues.shape, evectors.shape])
# qr
q, r = fn(torch.qr, (3, 0), True)
self.assertEqual([(3, 0), (0, 0)], [q.shape, r.shape])
q, r = fn(torch.qr, (0, 3), True)
self.assertEqual([(0, 0), (0, 3)], [q.shape, r.shape])
q, r = fn(torch.qr, (3, 0), False)
self.assertEqual([(3, 3), (3, 0)], [q.shape, r.shape])
# lstsq
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0, 0), torch.randn(0, 0)))
self.assertRaises(RuntimeError, lambda: torch.lstsq(torch.randn(0,), torch.randn(0, 0)))
@tf32_on_and_off(0.005)
def test_tensordot(self, device):
a = torch.arange(60., device=device).reshape(3, 4, 5)
b = torch.arange(24., device=device).reshape(4, 3, 2)
c = torch.tensordot(a, b, dims=([1, 0], [0, 1])).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=([1, 0], [0, 1])))
self.assertEqual(c, cn)
cout = torch.zeros((5, 2), device=device)
torch.tensordot(a, b, dims=([1, 0], [0, 1]), out=cout).cpu()
self.assertEqual(c, cout)
a = torch.randn(2, 3, 4, 5, device=device)
b = torch.randn(4, 5, 6, 7, device=device)
c = torch.tensordot(a, b, dims=2).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy(),
axes=2))
with self.assertRaisesRegex(RuntimeError, "expects dims >= 0"):
torch.tensordot(a, b, dims=-1)
self.assertEqual(c, cn)
c = torch.tensordot(a, b).cpu()
cn = torch.from_numpy(np.tensordot(a.cpu().numpy(), b.cpu().numpy()))
self.assertEqual(c, cn)
a = torch.tensordot(torch.tensor(0.), torch.tensor(0.), 0)
an = torch.from_numpy(np.tensordot(np.zeros((), dtype=np.float32), np.zeros((), dtype=np.float32), 0))
self.assertEqual(a, an)
@onlyCUDA
@skipCUDAIfNoMagma
@skipCUDAIfNoCusolver
@setLinalgBackendsToDefaultFinally
def test_preferred_linalg_library(self):
# The main purpose of this test is to make sure these "backend" calls work normally without raising exceptions.
x = torch.randint(2, 5, (2, 4, 4), device='cuda', dtype=torch.double)
torch.backends.cuda.preferred_linalg_library('cusolver')
out1 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('magma')
out2 = torch.linalg.inv(x)
torch.backends.cuda.preferred_linalg_library('default')
# Although linalg preferred flags doesn't affect CPU currently,
# we set this to make sure the flag can switch back to default normally.
out_ref = torch.linalg.inv(x.cpu())
self.assertEqual(out_ref, out1.cpu())
self.assertEqual(out1, out2)
instantiate_device_type_tests(TestLinalg, globals())
if __name__ == '__main__':
run_tests()
| 47.373297 | 131 | 0.579734 |
66da6cd601c4d595ae01cc0548907f734fc9b68e
| 957 |
py
|
Python
|
cli/commands/command.py
|
collabinator/clivrt
|
1a814bf41becf43b17c60e644e353f4196dceba8
|
[
"Apache-2.0"
] | 1 |
2022-02-11T16:56:38.000Z
|
2022-02-11T16:56:38.000Z
|
cli/commands/command.py
|
collabinator/clivrt
|
1a814bf41becf43b17c60e644e353f4196dceba8
|
[
"Apache-2.0"
] | 10 |
2022-02-02T05:23:25.000Z
|
2022-02-18T20:58:42.000Z
|
cli/commands/command.py
|
collabinator/clivrt
|
1a814bf41becf43b17c60e644e353f4196dceba8
|
[
"Apache-2.0"
] | null | null | null |
from configparser import ConfigParser
from distutils.command.config import config
import logging
from attrs import define
from cli.datamodel.session import Session
from cli.network.networkmanager import NetworkManager
@define
class Command:
cmd_name: str
config: ConfigParser
session: Session
network_mgr: NetworkManager
# base class init
def __init__(self, commands_list, config, session, wsclient):
self.config = config
self.session = session
self.network_mgr = wsclient
commands_list[self.cmd_name] = self
logging.debug("Command available " + self.__class__.__name__)
async def do_command(self, *args):
print('noop command')
def show_help(self):
help_text = getattr(self, "help_text", None)
if help_text:
print(help_text)
else:
print("Sorry, I can't help you with that")
| 29.90625 | 74 | 0.650993 |
7d712107fe66dfb7d1dba32d7c5c488821cc7928
| 1,164 |
py
|
Python
|
webStorm-APICloud/python_tools/Lib/test/test_netrc.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | 81 |
2017-03-13T08:24:01.000Z
|
2021-04-02T09:48:38.000Z
|
webStorm-APICloud/python_tools/Lib/test/test_netrc.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | 6 |
2017-04-30T08:36:55.000Z
|
2017-09-22T01:37:28.000Z
|
webStorm-APICloud/python_tools/Lib/test/test_netrc.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | 41 |
2017-03-18T14:11:58.000Z
|
2021-04-14T05:06:09.000Z
|
import netrc, os, unittest, sys
from test import test_support
TEST_NETRC = """
machine foo login log1 password pass1 account acct1
macdef macro1
line1
line2
macdef macro2
line3
line4
default login log2 password pass2
"""
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def setUp (self):
mode = 'w'
if sys.platform not in ['cygwin']:
mode += 't'
fp = open(temp_filename, mode)
fp.write(TEST_NETRC)
fp.close()
self.netrc = netrc.netrc(temp_filename)
def tearDown (self):
del self.netrc
os.unlink(temp_filename)
def test_case_1(self):
self.assert_(self.netrc.macros == {'macro1':['line1\n', 'line2\n'],
'macro2':['line3\n', 'line4\n']}
)
self.assert_(self.netrc.hosts['foo'] == ('log1', 'acct1', 'pass1'))
self.assert_(self.netrc.hosts['default'] == ('log2', None, 'pass2'))
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| 23.755102 | 77 | 0.565292 |
6394921455bdd45656c0d2323c7144b2c9aea58f
| 409 |
py
|
Python
|
main_crm/asgi.py
|
itsmohiful/customer-relationship-management
|
33363af8a272e5fda85d72709f3cf5d4469f555d
|
[
"MIT"
] | null | null | null |
main_crm/asgi.py
|
itsmohiful/customer-relationship-management
|
33363af8a272e5fda85d72709f3cf5d4469f555d
|
[
"MIT"
] | null | null | null |
main_crm/asgi.py
|
itsmohiful/customer-relationship-management
|
33363af8a272e5fda85d72709f3cf5d4469f555d
|
[
"MIT"
] | null | null | null |
"""
ASGI config for main_crm project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'main_crm.settings')
application = get_asgi_application()
| 24.058824 | 79 | 0.755501 |
fe29b8e5e7c80e5b0b1c6370135da47e66040e88
| 687 |
py
|
Python
|
var/spack/repos/builtin/packages/py-contextvars/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-contextvars/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 |
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/py-contextvars/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 |
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyContextvars(PythonPackage):
"""This package implements a backport of Python 3.7 contextvars module
(see PEP 567) for Python 3.6."""
homepage = "https://github.com/MagicStack/contextvars"
pypi = "contextvars/contextvars-2.4.tar.gz"
version('2.4', sha256='f38c908aaa59c14335eeea12abea5f443646216c4e29380d7bf34d2018e2c39e')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
| 32.714286 | 93 | 0.736536 |
d8515968dcf0d9a19c01d8cfe2ff889a5f1f0fd4
| 152 |
py
|
Python
|
vendimia/ventas/apps.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | null | null | null |
vendimia/ventas/apps.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | 5 |
2020-02-11T23:26:24.000Z
|
2022-01-13T00:39:54.000Z
|
vendimia/ventas/apps.py
|
soru13/vendimia
|
ebcf85473855e6f990b1a49574ac669fdd4d443e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class VentasConfig(AppConfig):
name = 'ventas'
| 16.888889 | 39 | 0.730263 |
d6b7cde0714cca573be9389088c8754d19ed5602
| 1,122 |
py
|
Python
|
fundamentals/download/tests/test__fetch.py
|
mtbannister/fundamentals
|
f1ba77ded115c727432220e50b2e20d251b2e225
|
[
"MIT"
] | null | null | null |
fundamentals/download/tests/test__fetch.py
|
mtbannister/fundamentals
|
f1ba77ded115c727432220e50b2e20d251b2e225
|
[
"MIT"
] | null | null | null |
fundamentals/download/tests/test__fetch.py
|
mtbannister/fundamentals
|
f1ba77ded115c727432220e50b2e20d251b2e225
|
[
"MIT"
] | null | null | null |
import os
import nose2
import nose2
import unittest
import shutil
import yaml
from fundamentals.download import _fetch
from fundamentals.utKit import utKit
from fundamentals import tools
su = tools(
arguments={"settingsFile": None},
docString=__doc__,
logLevel="DEBUG",
options_first=False,
projectName="fundamentals"
)
arguments, settings, log, dbConn = su.setup()
# load settings
stream = file(
"/Users/Dave/.config/fundamentals/fundamentals.yaml", 'r')
settings = yaml.load(stream)
stream.close()
# SETUP AND TEARDOWN FIXTURE FUNCTIONS FOR THE ENTIRE MODULE
moduleDirectory = os.path.dirname(__file__)
utKit = utKit(moduleDirectory)
log, dbConn, pathToInputDir, pathToOutputDir = utKit.setupModule()
utKit.tearDownModule()
# class test__fetch(unittest.TestCase):
# def test__fetch_function(self):
# kwargs = {}
# kwargs["log"] = log
# kwargs["settings"] = settings
# # xt-kwarg_key_and_value
# testObject = _fetch(**kwargs)
# testObject.get()
# x-print-testpage-for-pessto-marshall-web-object
# x-class-to-test-named-worker-function
| 23.375 | 66 | 0.717469 |
f38f8ecaf1cf48deb5cd0adafa4a3d1028e5ebdd
| 2,173 |
py
|
Python
|
alipay/aop/api/domain/AlipayOpenMessagetestCesSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213 |
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AlipayOpenMessagetestCesSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29 |
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AlipayOpenMessagetestCesSendModel.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59 |
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.GavintestNewLeveaOne import GavintestNewLeveaOne
class AlipayOpenMessagetestCesSendModel(object):
def __init__(self):
self._cop = None
self._str = None
@property
def cop(self):
return self._cop
@cop.setter
def cop(self, value):
if isinstance(value, list):
self._cop = list()
for i in value:
if isinstance(i, GavintestNewLeveaOne):
self._cop.append(i)
else:
self._cop.append(GavintestNewLeveaOne.from_alipay_dict(i))
@property
def str(self):
return self._str
@str.setter
def str(self, value):
if isinstance(value, list):
self._str = list()
for i in value:
self._str.append(i)
def to_alipay_dict(self):
params = dict()
if self.cop:
if isinstance(self.cop, list):
for i in range(0, len(self.cop)):
element = self.cop[i]
if hasattr(element, 'to_alipay_dict'):
self.cop[i] = element.to_alipay_dict()
if hasattr(self.cop, 'to_alipay_dict'):
params['cop'] = self.cop.to_alipay_dict()
else:
params['cop'] = self.cop
if self.str:
if isinstance(self.str, list):
for i in range(0, len(self.str)):
element = self.str[i]
if hasattr(element, 'to_alipay_dict'):
self.str[i] = element.to_alipay_dict()
if hasattr(self.str, 'to_alipay_dict'):
params['str'] = self.str.to_alipay_dict()
else:
params['str'] = self.str
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMessagetestCesSendModel()
if 'cop' in d:
o.cop = d['cop']
if 'str' in d:
o.str = d['str']
return o
| 28.592105 | 78 | 0.519098 |
07a91e44c6e98c84ceddaba90e12c8ffdd30a323
| 2,022 |
py
|
Python
|
rastervision/data/label_source/object_detection_geojson_source_config.py
|
AirbusAerial/raster-vision
|
cfa7826169392e497fb57a540eb952fc6cee3a98
|
[
"Apache-2.0"
] | 2 |
2019-04-17T13:04:23.000Z
|
2020-10-04T10:28:27.000Z
|
rastervision/data/label_source/object_detection_geojson_source_config.py
|
AirbusAerial/raster-vision
|
cfa7826169392e497fb57a540eb952fc6cee3a98
|
[
"Apache-2.0"
] | null | null | null |
rastervision/data/label_source/object_detection_geojson_source_config.py
|
AirbusAerial/raster-vision
|
cfa7826169392e497fb57a540eb952fc6cee3a98
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
import rastervision as rv
from rastervision.data.label_source import (
LabelSourceConfig, LabelSourceConfigBuilder, ObjectDetectionGeoJSONSource)
from rastervision.protos.label_source_pb2 import LabelSourceConfig as LabelSourceConfigMsg
class ObjectDetectionGeoJSONSourceConfig(LabelSourceConfig):
def __init__(self, uri):
super().__init__(source_type=rv.OBJECT_DETECTION_GEOJSON)
self.uri = uri
def to_proto(self):
msg = super().to_proto()
opts = LabelSourceConfigMsg.ObjectDetectionGeoJSONSource(uri=self.uri)
msg.object_detection_geojson_source.CopyFrom(opts)
return msg
def create_source(self, task_config, extent, crs_transformer, tmp_dir):
return ObjectDetectionGeoJSONSource(self.uri, crs_transformer,
task_config.class_map, extent)
def update_for_command(self,
command_type,
experiment_config,
context=None,
io_def=None):
io_def = io_def or rv.core.CommandIODefinition()
io_def.add_input(self.uri)
return io_def
class ObjectDetectionGeoJSONSourceConfigBuilder(LabelSourceConfigBuilder):
def __init__(self, prev=None):
config = {}
if prev:
config = {'uri': prev.uri}
super().__init__(ObjectDetectionGeoJSONSourceConfig, config)
def validate(self):
if self.config.get('uri') is None:
raise rv.ConfigError(
'You must set the uri for ObjectDetectionGeoJSONSourceConfig'
' Use "with_uri".')
def from_proto(self, msg):
b = ObjectDetectionGeoJSONSourceConfigBuilder()
return b \
.with_uri(msg.object_detection_geojson_source.uri)
def with_uri(self, uri):
"""Set URI for a GeoJSON used to read/write predictions."""
b = deepcopy(self)
b.config['uri'] = uri
return b
| 33.7 | 90 | 0.646884 |
0d912eb78233a40490ae11f3d34b738a50c05e0d
| 8,400 |
py
|
Python
|
white_box_attack.py
|
zeta1999/adversarial-robustness-by-design
|
3c252a17685bea0ffa94a6e1bcc694915470da0e
|
[
"MIT"
] | 8 |
2021-01-08T10:56:47.000Z
|
2021-06-08T07:58:14.000Z
|
white_box_attack.py
|
zeta1999/adversarial-robustness-by-design
|
3c252a17685bea0ffa94a6e1bcc694915470da0e
|
[
"MIT"
] | null | null | null |
white_box_attack.py
|
zeta1999/adversarial-robustness-by-design
|
3c252a17685bea0ffa94a6e1bcc694915470da0e
|
[
"MIT"
] | 1 |
2021-05-04T09:41:49.000Z
|
2021-05-04T09:41:49.000Z
|
import argparse
import numpy as np
import torch
from utils import get_model, compute_score, cifar10, cifar100, return_path_to_folder
class AdversarialAttack:
def __init__(self, model, dataset, n_epochs, step_size, opu_output, opu_input, pgd_attacks_iterations, binary_layer,
save_images, sign_back, opu):
""" Class that performs the adversarial attack (both FGSM and PGD), if save_images is True it saves the images
for further experiments; i.e. transfer attack"""
self.model_name = model
self.dataset = dataset
self.n_epochs = n_epochs
self.step_size = step_size
self.opu_output = opu_output
self.opu_input = opu_input
self.pgd_attacks_iterations = pgd_attacks_iterations
self.binary_layer = binary_layer
self.save_images = save_images
self.sign_back = sign_back
self.opu = opu
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.path_to_folder = return_path_to_folder(args.dataset)
self.model, self.file_name = self._upload_model()
def attack(self, dataloader, epsilon):
correct_fgsm, correct_PGD, n = 0, 0, 0
if self.save_images:
saved_FGSM, saved_PGD = torch.zeros(10000, 3, 32, 32), torch.zeros(10000, 3, 32, 32)
saved_labels = torch.zeros(10000)
dataset = self.path_to_folder.split('/')[1]
images_path = 'adversarial_data/' + dataset + '/'
pathlib.Path(images_path).mkdir(parents=True, exist_ok=True)
sanity_check = True
for i, (x, target) in enumerate(dataloader):
x, target = x.to(self.device), target.to(self.device)
if sanity_check:
print(f'- Check origianl clamp, x max = {x.max()}, x min = {x.min()}')
n += x.shape[0]
x.requires_grad = True
perturbed_x_fgsm = self._projected_gradient_descent(x=x, y=target, eps=epsilon, sanity_check=sanity_check,
num_steps=1, clamp=(x.min().item(), x.max().item()),
step_size=self.step_size)
perturbed_x_PGD = self._projected_gradient_descent(x=x, y=target, eps=epsilon, sanity_check=sanity_check,
num_steps=self.pgd_attacks_iterations,
clamp=(x.min().item(), x.max().item()),
step_size=self.step_size)
if self.save_images:
saved_FGSM[i * dataloader.batch_size: (i + 1) * dataloader.batch_size] = perturbed_x_fgsm.to('cpu')
saved_PGD[i * dataloader.batch_size: (i + 1) * dataloader.batch_size] = perturbed_x_PGD.to('cpu')
saved_labels[i * dataloader.batch_size: (i + 1) * dataloader.batch_size] = target.to('cpu')
y_fgsm, y_PGD = self.model(perturbed_x_fgsm), self.model(perturbed_x_PGD)
y_fgsm, y_PGD = torch.argmax(y_fgsm, dim=1), torch.argmax(y_PGD, dim=1)
correct_fgsm += (y_fgsm == target).float().sum()
correct_PGD += (y_PGD == target).float().sum()
sanity_check = False
final_acc_fgsm = correct_fgsm / n
final_acc_PGD = correct_PGD / n
if self.save_images:
torch.save(saved_FGSM, images_path + f'fgsm_{epsilon}')
torch.save(saved_PGD, images_path + f'pgd_{epsilon}')
torch.save(saved_labels, images_path + f'labels_{epsilon}')
return final_acc_fgsm.item(), final_acc_PGD.item()
def _projected_gradient_descent(self, x, y, eps, sanity_check, num_steps, step_size, clamp=(-1, 1)):
"""Performs the projected gradient descent attack on a batch of images."""
x_adv = x.clone().detach().requires_grad_(True).to(x.device)
for i in range(num_steps):
_x_adv = x_adv.clone().detach().requires_grad_(True)
prediction = self.model(_x_adv)
self.model.zero_grad()
loss = torch.nn.functional.cross_entropy(prediction, y)
loss.backward()
with torch.no_grad():
if num_steps == 1:
x_adv += eps * _x_adv.grad.data.sign()
if eps == 0 and sanity_check:
print(f'-Sanity check after grad adding: {torch.equal(x_adv, x)}')
else:
x_adv += _x_adv.grad.data.sign() * step_size
x_adv = torch.max(torch.min(x_adv, x + eps), x - eps)
x_adv = x_adv.clamp(*clamp)
if eps == 0 and sanity_check:
print(f'- Sanity check after clamping: {torch.equal(x_adv, x)}')
sanity_check = False
return x_adv.detach()
def _upload_model(self):
"""Uploads model to perform the attack on"""
model, file_name = get_model(model=self.model_name, binary_layer=self.binary_layer, opu_output=self.opu_output,
opu_input=self.opu_input, sign_back=self.sign_back, n_epochs=self.n_epochs,
device=self.device, dataset=self.dataset, opu=self.opu)
model.load_state_dict(torch.load(self.path_to_folder + 'models/' + file_name + '.pt'))
print(file_name)
model.eval()
return model.to(self.device), file_name
def pars_args():
parser = argparse.ArgumentParser('parameters')
parser.add_argument("-model", default='VGG16-R-OPU', type=str, help='Name of the model',
choices=['VGG-16', 'VGG16-OPU', 'VGG16-R-OPU', 'Ablation'])
parser.add_argument("-dataset", default='cifar10', type=str, help='dataset', choices=['cifar10', 'cifar100'])
parser.add_argument("-n_epochs", default=120, type=int, help='Number of epochs the model has been trained')
parser.add_argument("-step_size", default=0.01, type=float,
help='Learning rate with which the model has been trained')
parser.add_argument("-opu_output", default=512, type=int, help='Dimension of OPU output')
parser.add_argument("-opu_input", default=512, type=int, help='Dimension of OPU output')
parser.add_argument("-pgd_attacks_iterations", default=50, type=int, help='Number of iterations for PGD attack.')
# boolean switches.
parser.add_argument("-binary_layer", default=False, action='store_true', help='To activate binary layer')
parser.add_argument("-save_images", default=False, action='store_true', help='Saves images for TA')
parser.add_argument("-sign_back", default=False, action='store_true', help='Replace sign by identity for BDSM')
parser.add_argument("-opu", default=False, action='store_true', help='Needed for ablation models')
args = parser.parse_args()
return args
if __name__ == '__main__':
import pathlib
args = pars_args()
adversarial_attack = AdversarialAttack(args.model, args.dataset, args.n_epochs, args.step_size, args.opu_output,
args.opu_input, args.pgd_attacks_iterations, args.binary_layer,
args.save_images,
args.sign_back, args.opu)
if args.dataset == 'cifar10':
_, test_dl = cifar10(batch_size=100, num_workers=8) # if it gives trouble remove num_workers
else:
_, test_dl = cifar100(batch_size=100, num_workers=8) # same as above
print(
f'Test score={compute_score(adversarial_attack.model, test_dl, adversarial_attack.device)}, to be compared to eps=0 attack')
epsilons = [0, .01, .02, .03, .04, .05]
if args.save_images:
epsilons = epsilons + [0.06, 0.07, 0.08, 0.09, 0.1] # images are saved for transfer attack experiments.
accuracies = list()
for eps in epsilons:
acc_test = adversarial_attack.attack(test_dl, eps)
accuracies.append(acc_test)
print(eps, accuracies[-1])
file_name = 'AA_' + adversarial_attack.file_name
path = adversarial_attack.path_to_folder + 'attacks/'
if args.sign_back:
file_name = file_name + '_sign_back'
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
np.savez(path + file_name, accuracies=accuracies, eps=epsilons)
| 50 | 132 | 0.612381 |
c06695b572ea594fe782c5f5eba5162ae256bd11
| 94 |
py
|
Python
|
17 - Modules/Ex_111/exec111.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | 4 |
2021-04-23T18:07:58.000Z
|
2021-05-12T11:38:14.000Z
|
17 - Modules/Ex_111/exec111.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | null | null | null |
17 - Modules/Ex_111/exec111.py
|
o-Ian/Practice-Python
|
1e4b2d0788e70006096a53a7cf038db3148ba4b7
|
[
"MIT"
] | null | null | null |
from utilidadesCeV.moeda import resumo
p = float(input('Digite o preço: '))
resumo(p, 80, 35)
| 23.5 | 38 | 0.723404 |
b503edd34a703f9584446125494b0cb75b8bd617
| 4,308 |
py
|
Python
|
src/sage/combinat/root_system/type_G_affine.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 1,742 |
2015-01-04T07:06:13.000Z
|
2022-03-30T11:32:52.000Z
|
src/sage/combinat/root_system/type_G_affine.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 66 |
2015-03-19T19:17:24.000Z
|
2022-03-16T11:59:30.000Z
|
src/sage/combinat/root_system/type_G_affine.py
|
UCD4IDS/sage
|
43474c96d533fd396fe29fe0782d44dc7f5164f7
|
[
"BSL-1.0"
] | 495 |
2015-01-10T10:23:18.000Z
|
2022-03-24T22:06:11.000Z
|
"""
Root system data for (untwisted) type G affine
"""
#*****************************************************************************
# Copyright (C) 2008-2009 Daniel Bump
# Copyright (C) 2008-2009 Justin Walker
# Copyright (C) 2008-2009 Nicolas M. Thiery <nthiery at users.sf.net>,
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from .cartan_type import CartanType_standard_untwisted_affine
class CartanType(CartanType_standard_untwisted_affine):
def __init__(self):
"""
EXAMPLES::
sage: ct = CartanType(['G',2,1])
sage: ct
['G', 2, 1]
sage: ct._repr_(compact = True)
'G2~'
sage: ct.is_irreducible()
True
sage: ct.is_finite()
False
sage: ct.is_affine()
True
sage: ct.is_untwisted_affine()
True
sage: ct.is_crystallographic()
True
sage: ct.is_simply_laced()
False
sage: ct.classical()
['G', 2]
sage: ct.dual()
['G', 2, 1]^*
sage: ct.dual().is_untwisted_affine()
False
TESTS::
sage: TestSuite(ct).run()
"""
CartanType_standard_untwisted_affine.__init__(self, "G",2)
def dynkin_diagram(self):
"""
Returns the extended Dynkin diagram for type G.
EXAMPLES::
sage: g = CartanType(['G',2,1]).dynkin_diagram()
sage: g
3
O=<=O---O
1 2 0
G2~
sage: sorted(g.edges())
[(0, 2, 1), (1, 2, 1), (2, 0, 1), (2, 1, 3)]
"""
from .dynkin_diagram import DynkinDiagram_class
g = DynkinDiagram_class(self)
g.add_edge(1, 2)
g.set_edge_label(2,1,3)
g.add_edge(0, 2)
return g
def _latex_dynkin_diagram(self, label=lambda x: x, node=None, node_dist=2, dual=False):
r"""
Return a latex representation of the Dynkin diagram.
EXAMPLES::
sage: print(CartanType(['G',2,1])._latex_dynkin_diagram())
\draw (2 cm,0) -- (4.0 cm,0);
\draw (0, 0.15 cm) -- +(2 cm,0);
\draw (0, -0.15 cm) -- +(2 cm,0);
\draw (0,0) -- (2 cm,0);
\draw (0, 0.15 cm) -- +(2 cm,0);
\draw (0, -0.15 cm) -- +(2 cm,0);
\draw[shift={(0.8, 0)}, rotate=180] (135 : 0.45cm) -- (0,0) -- (-135 : 0.45cm);
\draw[fill=white] (0 cm, 0 cm) circle (.25cm) node[below=4pt]{$1$};
\draw[fill=white] (2 cm, 0 cm) circle (.25cm) node[below=4pt]{$2$};
\draw[fill=white] (4 cm, 0 cm) circle (.25cm) node[below=4pt]{$0$};
<BLANKLINE>
"""
if node is None:
node = self._latex_draw_node
ret = "\\draw (%s cm,0) -- (%s cm,0);\n"%(node_dist, node_dist*2.0)
ret += "\\draw (0, 0.15 cm) -- +(%s cm,0);\n"%node_dist
ret += "\\draw (0, -0.15 cm) -- +(%s cm,0);\n"%node_dist
ret += self.classical()._latex_dynkin_diagram(label, node, node_dist, dual)
ret += node(2*node_dist, 0, label(0))
return ret
def ascii_art(self, label=lambda i: i, node=None):
"""
Returns an ascii art representation of the Dynkin diagram
EXAMPLES::
sage: print(CartanType(['G',2,1]).ascii_art(label = lambda x: x+2))
3
O=<=O---O
3 4 2
"""
if node is None:
node = self._ascii_art_node
ret = " 3\n{}=<={}---{}".format(node(label(1)), node(label(2)), node(label(0)))
return ret + "\n{!s:4}{!s:4}{!s:4}".format(label(1), label(2), label(0))
def _default_folded_cartan_type(self):
"""
Return the default folded Cartan type.
EXAMPLES::
sage: CartanType(['G', 2, 1])._default_folded_cartan_type()
['G', 2, 1] as a folding of ['D', 4, 1]
"""
from sage.combinat.root_system.type_folded import CartanTypeFolded
return CartanTypeFolded(self, ['D', 4, 1], [[0], [1, 3, 4], [2]])
| 33.65625 | 91 | 0.481198 |
823e8bd09872e30562c3f0b59970c62ecd33a9da
| 1,995 |
py
|
Python
|
structural-probes/lpmayos_probes_experiments/analyze_results/add_mlm_perplexities.py
|
lpmayos/structural-probes
|
05a0914ac55b45149057c120635834d7134c6986
|
[
"Apache-2.0"
] | null | null | null |
structural-probes/lpmayos_probes_experiments/analyze_results/add_mlm_perplexities.py
|
lpmayos/structural-probes
|
05a0914ac55b45149057c120635834d7134c6986
|
[
"Apache-2.0"
] | null | null | null |
structural-probes/lpmayos_probes_experiments/analyze_results/add_mlm_perplexities.py
|
lpmayos/structural-probes
|
05a0914ac55b45149057c120635834d7134c6986
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os
from argparse import ArgumentParser
import json
import logging
import re
def add_perplexities(models_path, output_file, results_folder):
"""
"""
with open(output_file, 'r') as f:
results = json.load(f)
for i, run in results.items():
for j, checkpoint in run.items():
checkpoint_path = models_path + '/' + i + '/' + results_folder + '/checkpoint-' + j
mlm_results_path = checkpoint_path + '/eval_mlm_results.txt'
if os.path.exists(mlm_results_path):
with open(mlm_results_path) as file:
line = file.readline()
pattern = re.compile(r"[-+]?[0-9]*\.?[0-9]+") # regular expression to match floats, with optional +/-
mlm_perplexity = float(pattern.findall(line)[0])
checkpoint['mlm_perplexity'] = mlm_perplexity
else:
logging.info('File %s does not exists yet' % mlm_results_path)
checkpoint['mlm_perplexity'] = None
# transform dictionary keys to integers for propoer sorting when writing to file
results[i] = {int(k): v for k, v in run.items()}
with open(output_file, 'w') as f:
json.dump(results, f, indent=4, sort_keys=True)
if __name__ == '__main__':
"""
"""
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
parser = ArgumentParser()
parser.add_argument("--models_path", default=None, type=str, required=True, help="/home/lpmayos/hd/code/transformers/lpmayos_experiments/bert_base_cased_finetuned_parsing")
parser.add_argument("--output_file", default=None, type=str, required=True, help="bert_base_cased_finetuned_parsing_ptb_results.json")
parser.add_argument("--results_folder", default=None, type=str, required=True, help="reuslts_parsing")
args = parser.parse_args()
add_perplexities(args.models_path, args.output_file, args.results_folder)
| 36.944444 | 176 | 0.653133 |
74bd608c0b832030efb97f066ec59c5302eec660
| 4,087 |
py
|
Python
|
platform/core/tests/test_ci/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_ci/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
platform/core/tests/test_ci/test_service.py
|
hackerwins/polyaxon
|
ff56a098283ca872abfbaae6ba8abba479ffa394
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import pytest
import ci
from db.models.build_jobs import BuildJob
from db.models.experiment_groups import ExperimentGroup
from db.models.experiments import Experiment
from db.models.jobs import Job
from factories.ci_factory import CIFactory
from factories.factory_projects import ProjectFactory
from factories.factory_repos import ExternalRepoFactory, RepoFactory
from libs.repos import git
from tests.base.case import BaseTest
@pytest.mark.ci_mark
class TestCIService(BaseTest):
def setUp(self):
super().setUp()
self.project = ProjectFactory()
# Set ci
self.ci = CIFactory(project=self.project)
def test_sync_get_latest_commit_internal_repo(self):
assert self.ci.code_reference is None
# No repo
assert ci.sync(self.project) is False
assert self.ci.code_reference is None
# Repo but no commits
repo = RepoFactory(project=self.project)
assert ci.sync(self.project) is False
assert self.ci.code_reference is None
# Put file and commit
open('{}/foo'.format(repo.path), 'w')
git.commit(repo.path, '[email protected]', 'username')
assert ci.sync(self.project) is True
assert self.ci.code_reference is not None
last_code_ref = self.ci.code_reference
# Resync without change does not create new code ref
assert ci.sync(self.project) is False
assert self.ci.code_reference == last_code_ref
# Add new commit
open('{}/foo2'.format(repo.path), 'w')
git.commit(repo.path, '[email protected]', 'username')
assert ci.sync(self.project) is True
assert self.ci.code_reference is not None
assert self.ci.code_reference != last_code_ref
def test_sync_get_latest_commit_external_repo(self):
assert self.ci.code_reference is None
# No repo
assert ci.sync(self.project) is False
assert self.ci.code_reference is None
# Repo
repo = ExternalRepoFactory(project=self.project,
git_url='https://github.com/polyaxon/empty.git')
assert ci.sync(self.project) is True
assert self.ci.code_reference is not None
last_code_ref = self.ci.code_reference
# Resync without change does not create new code ref
assert ci.sync(self.project) is False
assert self.ci.code_reference == last_code_ref
# Creating file manually does not work
open('{}/foo'.format(repo.path), 'w')
git.commit(repo.path, '[email protected]', 'username')
assert ci.sync(self.project) is False
assert self.ci.code_reference is not None
assert self.ci.code_reference == last_code_ref
def test_trigger(self):
assert Experiment.objects.count() == 0
assert ExperimentGroup.objects.count() == 0
assert Job.objects.count() == 0
assert BuildJob.objects.count() == 0
# No repo
assert ci.trigger(self.project) is False
assert Experiment.objects.count() == 0
assert ExperimentGroup.objects.count() == 0
assert Job.objects.count() == 0
assert BuildJob.objects.count() == 0
# New code
repo = RepoFactory(project=self.project)
open('{}/foo'.format(repo.path), 'w')
git.commit(repo.path, '[email protected]', 'username')
assert ci.trigger(self.project) is False
assert Experiment.objects.count() == 0
assert ExperimentGroup.objects.count() == 0
assert Job.objects.count() == 0
assert BuildJob.objects.count() == 0
# New file
shutil.copy(os.path.abspath('tests/fixtures_static/polyaxonfile.yml'),
'{}/polyaxonfile.yml'.format(repo.path))
git.commit(repo.path, '[email protected]', 'username')
assert ci.trigger(self.project) is True
assert Experiment.objects.count() == 1
assert ExperimentGroup.objects.count() == 0
assert Job.objects.count() == 0
assert BuildJob.objects.count() == 0
| 35.232759 | 83 | 0.654514 |
33d0f1685f9b917a7cf35cfda323ed0cbf6a7422
| 6,181 |
py
|
Python
|
src/python/bcc/libbcc.py
|
sbilly/bcc
|
52cd371306705c39ad8c06fef195f6c4cdebf664
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2021-04-30T05:05:53.000Z
|
2021-04-30T05:05:53.000Z
|
src/python/bcc/libbcc.py
|
sbilly/bcc
|
52cd371306705c39ad8c06fef195f6c4cdebf664
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/python/bcc/libbcc.py
|
sbilly/bcc
|
52cd371306705c39ad8c06fef195f6c4cdebf664
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2015 PLUMgrid
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes as ct
lib = ct.CDLL("libbcc.so.0", use_errno=True)
# keep in sync with bpf_common.h
lib.bpf_module_create_b.restype = ct.c_void_p
lib.bpf_module_create_b.argtypes = [ct.c_char_p, ct.c_char_p, ct.c_uint]
lib.bpf_module_create_c.restype = ct.c_void_p
lib.bpf_module_create_c.argtypes = [ct.c_char_p, ct.c_uint,
ct.POINTER(ct.c_char_p), ct.c_int]
lib.bpf_module_create_c_from_string.restype = ct.c_void_p
lib.bpf_module_create_c_from_string.argtypes = [ct.c_char_p, ct.c_uint,
ct.POINTER(ct.c_char_p), ct.c_int]
lib.bpf_module_destroy.restype = None
lib.bpf_module_destroy.argtypes = [ct.c_void_p]
lib.bpf_module_license.restype = ct.c_char_p
lib.bpf_module_license.argtypes = [ct.c_void_p]
lib.bpf_module_kern_version.restype = ct.c_uint
lib.bpf_module_kern_version.argtypes = [ct.c_void_p]
lib.bpf_num_functions.restype = ct.c_ulonglong
lib.bpf_num_functions.argtypes = [ct.c_void_p]
lib.bpf_function_name.restype = ct.c_char_p
lib.bpf_function_name.argtypes = [ct.c_void_p, ct.c_ulonglong]
lib.bpf_function_start.restype = ct.c_void_p
lib.bpf_function_start.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_function_size.restype = ct.c_size_t
lib.bpf_function_size.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_id.restype = ct.c_ulonglong
lib.bpf_table_id.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_fd.restype = ct.c_int
lib.bpf_table_fd.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_type_id.restype = ct.c_int
lib.bpf_table_type_id.argtypes = [ct.c_void_p, ct.c_ulonglong]
lib.bpf_table_max_entries_id.restype = ct.c_ulonglong
lib.bpf_table_max_entries_id.argtypes = [ct.c_void_p, ct.c_ulonglong]
lib.bpf_table_key_desc.restype = ct.c_char_p
lib.bpf_table_key_desc.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_leaf_desc.restype = ct.c_char_p
lib.bpf_table_leaf_desc.argtypes = [ct.c_void_p, ct.c_char_p]
lib.bpf_table_key_snprintf.restype = ct.c_int
lib.bpf_table_key_snprintf.argtypes = [ct.c_void_p, ct.c_ulonglong,
ct.c_char_p, ct.c_ulonglong, ct.c_void_p]
lib.bpf_table_leaf_snprintf.restype = ct.c_int
lib.bpf_table_leaf_snprintf.argtypes = [ct.c_void_p, ct.c_ulonglong,
ct.c_char_p, ct.c_ulonglong, ct.c_void_p]
lib.bpf_table_key_sscanf.restype = ct.c_int
lib.bpf_table_key_sscanf.argtypes = [ct.c_void_p, ct.c_ulonglong,
ct.c_char_p, ct.c_void_p]
lib.bpf_table_leaf_sscanf.restype = ct.c_int
lib.bpf_table_leaf_sscanf.argtypes = [ct.c_void_p, ct.c_ulonglong,
ct.c_char_p, ct.c_void_p]
# keep in sync with libbpf.h
lib.bpf_get_next_key.restype = ct.c_int
lib.bpf_get_next_key.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p]
lib.bpf_lookup_elem.restype = ct.c_int
lib.bpf_lookup_elem.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p]
lib.bpf_update_elem.restype = ct.c_int
lib.bpf_update_elem.argtypes = [ct.c_int, ct.c_void_p, ct.c_void_p,
ct.c_ulonglong]
lib.bpf_delete_elem.restype = ct.c_int
lib.bpf_delete_elem.argtypes = [ct.c_int, ct.c_void_p]
lib.bpf_open_raw_sock.restype = ct.c_int
lib.bpf_open_raw_sock.argtypes = [ct.c_char_p]
lib.bpf_attach_socket.restype = ct.c_int
lib.bpf_attach_socket.argtypes = [ct.c_int, ct.c_int]
lib.bpf_prog_load.restype = ct.c_int
lib.bpf_prog_load.argtypes = [ct.c_int, ct.c_void_p, ct.c_size_t,
ct.c_char_p, ct.c_uint, ct.c_char_p, ct.c_uint]
lib.bpf_attach_kprobe.restype = ct.c_void_p
_CB_TYPE = ct.CFUNCTYPE(None, ct.py_object, ct.c_int,
ct.c_ulonglong, ct.POINTER(ct.c_ulonglong))
_RAW_CB_TYPE = ct.CFUNCTYPE(None, ct.py_object, ct.c_void_p, ct.c_int)
lib.bpf_attach_kprobe.argtypes = [ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_int,
ct.c_int, ct.c_int, _CB_TYPE, ct.py_object]
lib.bpf_detach_kprobe.restype = ct.c_int
lib.bpf_detach_kprobe.argtypes = [ct.c_char_p]
lib.bpf_attach_uprobe.restype = ct.c_void_p
lib.bpf_attach_uprobe.argtypes = [ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_int,
ct.c_int, ct.c_int, _CB_TYPE, ct.py_object]
lib.bpf_detach_uprobe.restype = ct.c_int
lib.bpf_detach_uprobe.argtypes = [ct.c_char_p]
lib.bpf_attach_tracepoint.restype = ct.c_void_p
lib.bpf_attach_tracepoint.argtypes = [ct.c_int, ct.c_char_p, ct.c_char_p, ct.c_int,
ct.c_int, ct.c_int, _CB_TYPE, ct.py_object]
lib.bpf_detach_tracepoint.restype = ct.c_int
lib.bpf_detach_tracepoint.argtypes = [ct.c_char_p, ct.c_char_p]
lib.bpf_open_perf_buffer.restype = ct.c_void_p
lib.bpf_open_perf_buffer.argtypes = [_RAW_CB_TYPE, ct.py_object, ct.c_int, ct.c_int]
lib.perf_reader_poll.restype = ct.c_int
lib.perf_reader_poll.argtypes = [ct.c_int, ct.POINTER(ct.c_void_p), ct.c_int]
lib.perf_reader_free.restype = None
lib.perf_reader_free.argtypes = [ct.c_void_p]
lib.perf_reader_fd.restype = int
lib.perf_reader_fd.argtypes = [ct.c_void_p]
# bcc symbol helpers
class bcc_symbol(ct.Structure):
_fields_ = [
('name', ct.c_char_p),
('module', ct.c_char_p),
('offset', ct.c_ulonglong),
]
lib.bcc_procutils_which_so.restype = ct.c_char_p
lib.bcc_procutils_which_so.argtypes = [ct.c_char_p]
lib.bcc_resolve_symname.restype = ct.c_int
lib.bcc_resolve_symname.argtypes = [
ct.c_char_p, ct.c_char_p, ct.c_ulonglong, ct.POINTER(bcc_symbol)]
lib.bcc_symcache_new.restype = ct.c_void_p
lib.bcc_symcache_new.argtypes = [ct.c_int]
lib.bcc_symcache_resolve.restype = ct.c_int
lib.bcc_symcache_resolve.argtypes = [ct.c_void_p, ct.c_ulonglong, ct.POINTER(bcc_symbol)]
lib.bcc_symcache_resolve_name.restype = ct.c_int
lib.bcc_symcache_resolve_name.argtypes = [
ct.c_void_p, ct.c_char_p, ct.POINTER(ct.c_ulonglong)]
lib.bcc_symcache_refresh.restype = None
lib.bcc_symcache_refresh.argtypes = [ct.c_void_p]
| 44.789855 | 89 | 0.781751 |
fb27304866f6957fa8f22810b6914ec2f7b22492
| 657 |
py
|
Python
|
picture_bed/models.py
|
TechDailyGroup/news_app_web_api
|
f1255c70dfd3eaa49e77b67fb1392aa7f8c4e7fb
|
[
"Apache-2.0"
] | null | null | null |
picture_bed/models.py
|
TechDailyGroup/news_app_web_api
|
f1255c70dfd3eaa49e77b67fb1392aa7f8c4e7fb
|
[
"Apache-2.0"
] | 6 |
2021-06-02T02:04:38.000Z
|
2022-03-12T00:34:28.000Z
|
picture_bed/models.py
|
TechDailyGroup/news_app_web_api
|
f1255c70dfd3eaa49e77b67fb1392aa7f8c4e7fb
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
class Picture(models.Model):
def user_directory_path(instance, filename):
# This is a legacy function used for migration
return user_pictures_path(instance, filename)
def user_pictures_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/pictures/user_<username>/<filename>
return 'user_{0}/pictures/{1}'.format(instance.user.username, filename)
picture = models.ImageField(upload_to=user_pictures_path)
user = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.picture.url
| 34.578947 | 81 | 0.730594 |
fd5a05d22270a56ae61471427df8eb8461a6243a
| 1,357 |
py
|
Python
|
vang/tfs/get_projects.py
|
mattiasl/scripts
|
e9245ce432b0dd5743506654ada52e017d0b6be0
|
[
"Apache-2.0"
] | 6 |
2018-01-31T09:59:18.000Z
|
2020-06-09T08:55:22.000Z
|
vang/tfs/get_projects.py
|
mattiasl/scripts
|
e9245ce432b0dd5743506654ada52e017d0b6be0
|
[
"Apache-2.0"
] | null | null | null |
vang/tfs/get_projects.py
|
mattiasl/scripts
|
e9245ce432b0dd5743506654ada52e017d0b6be0
|
[
"Apache-2.0"
] | 2 |
2018-11-19T09:56:46.000Z
|
2020-06-08T10:53:11.000Z
|
#!/usr/bin/env python3
import argparse
from sys import argv
from vang.tfs.api import call
def get_projects(organisations, project_specs=False, names=False):
if not organisations:
return []
projects = [(o, p) for o in organisations
for p in call(f'/{o}/_apis/projects?api-version=3.2')['value']]
if names:
return [project[1]['name'] for project in projects]
if project_specs:
return [f'{project[0]}/{project[1]["name"]}' for project in projects]
return projects
def parse_args(args):
parser = argparse.ArgumentParser(description='Get TFS projects')
parser.add_argument(
'organisations', nargs='+', help='TFS organisations, e.g organisation')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
'-n', '--names', action='store_true', help='Get only project names')
group.add_argument(
'-p',
'--project_specs',
action='store_true',
help='Get only organisation/project')
return parser.parse_args(args)
def main(organisations,
project_specs,
names):
for a_project in get_projects(
organisations,
project_specs,
names):
print(a_project)
if __name__ == '__main__': # pragma: no cover
main(**parse_args(argv[1:]).__dict__)
| 27.14 | 79 | 0.638909 |
d9576879dd2d1afb75a90f0a5b8cbbece85b9a6d
| 2,819 |
py
|
Python
|
statsmodels/tools/transform_model.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 6,931 |
2015-01-01T11:41:55.000Z
|
2022-03-31T17:03:24.000Z
|
statsmodels/tools/transform_model.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 6,137 |
2015-01-01T00:33:45.000Z
|
2022-03-31T22:53:17.000Z
|
statsmodels/tools/transform_model.py
|
CCHiggins/statsmodels
|
300b6fba90c65c8e94b4f83e04f7ae1b0ceeac2e
|
[
"BSD-3-Clause"
] | 2,608 |
2015-01-02T21:32:31.000Z
|
2022-03-31T07:38:30.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 27 13:23:24 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
class StandardizeTransform(object):
"""class to reparameterize a model for standardized exog
Parameters
----------
data : array_like
data that is standardized along axis=0
ddof : None or int
degrees of freedom for calculation of standard deviation.
default is 1, in contrast to numpy.std
const_idx : None or int
If None, then the presence of a constant is detected if the standard
deviation of a column is **equal** to zero. A constant column is
not transformed. If this is an integer, then the corresponding column
will not be transformed.
demean : bool, default is True
If demean is true, then the data will be demeaned, otherwise it will
only be rescaled.
Notes
-----
Warning: Not all options are tested and it is written for one use case.
API changes are expected.
This can be used to transform only the design matrix, exog, in a model,
which is required in some discrete models when the endog cannot be rescaled
or demeaned.
The transformation is full rank and does not drop the constant.
"""
def __init__(self, data, ddof=1, const_idx=None, demean=True):
data = np.asarray(data)
self.mean = data.mean(0)
self.scale = data.std(0, ddof=1)
# do not transform a constant
if const_idx is None:
const_idx = np.nonzero(self.scale == 0)[0]
if len(const_idx) == 0:
const_idx = 'n'
else:
const_idx = int(const_idx)
if const_idx != 'n':
self.mean[const_idx] = 0
self.scale[const_idx] = 1
if demean is False:
self.mean = None
self.const_idx = const_idx
def transform(self, data):
"""standardize the data using the stored transformation
"""
# could use scipy.stats.zscore instead
if self.mean is None:
return np.asarray(data) / self.scale
else:
return (np.asarray(data) - self.mean) / self.scale
def transform_params(self, params):
"""Transform parameters of the standardized model to the original model
Parameters
----------
params : ndarray
parameters estimated with the standardized model
Returns
-------
params_new : ndarray
parameters transformed to the parameterization of the original
model
"""
params_new = params / self.scale
if self.const_idx != 'n':
params_new[self.const_idx] -= (params_new * self.mean).sum()
return params_new
__call__ = transform
| 29.364583 | 79 | 0.608372 |
4962bd44eb6517b3ce1dca4bffa3eb44b0712549
| 490 |
py
|
Python
|
sphinx/source/docs/user_guide/source_examples/concepts_charts.py
|
rothnic/bokeh
|
8da5e16b260a75caa8e7ef4caf215bb93dd784db
|
[
"BSD-3-Clause"
] | 1 |
2015-07-17T13:57:01.000Z
|
2015-07-17T13:57:01.000Z
|
sphinx/source/docs/user_guide/source_examples/concepts_charts.py
|
rothnic/bokeh
|
8da5e16b260a75caa8e7ef4caf215bb93dd784db
|
[
"BSD-3-Clause"
] | null | null | null |
sphinx/source/docs/user_guide/source_examples/concepts_charts.py
|
rothnic/bokeh
|
8da5e16b260a75caa8e7ef4caf215bb93dd784db
|
[
"BSD-3-Clause"
] | 1 |
2016-03-18T03:01:59.000Z
|
2016-03-18T03:01:59.000Z
|
from bokeh.charts import Scatter, output_file, show
# prepare some data, a Pandas GroupBy object in this case
from bokeh.sampledata.iris import flowers
grouped = flowers[["petal_length", "petal_width", "species"]].groupby("species")
# create a scatter chart
p = Scatter(grouped, title="iris data", width=400, height=400,
xlabel="petal length", ylabel="petal width", legend='top_left')
# specify how to output the plot(s)
output_file("foo.html")
# display the figure
show(p)
| 30.625 | 80 | 0.730612 |
8c22c1c4e9d862b7f3eabb75ca462bc526af8cd5
| 3,133 |
py
|
Python
|
8_Concurrency/2_asyncio/1_generators.py
|
hauntarl/real-python
|
6ffb535648bf5c79c90e2ed7def842078bc7807f
|
[
"MIT"
] | 2 |
2020-12-15T18:11:00.000Z
|
2021-03-01T11:43:16.000Z
|
8_Concurrency/2_asyncio/1_generators.py
|
hauntarl/real_python
|
6ffb535648bf5c79c90e2ed7def842078bc7807f
|
[
"MIT"
] | null | null | null |
8_Concurrency/2_asyncio/1_generators.py
|
hauntarl/real_python
|
6ffb535648bf5c79c90e2ed7def842078bc7807f
|
[
"MIT"
] | null | null | null |
"""
asyncio is a library built into python to write concurrent code using the
async/await syntax.
- concurrency is not parallelism: things that are concurrent are tasks or items
that could potentially run in parallel but may not necessarily run in parallel.
It is often a perfect fit for IO-bound and high-level structured network code.
- IO-bound: if your application's performance mainly depends IO and the reason
its slow is because of IO then it is called an IO-bound app. eg. reading to a
file system, reading to a database, talking to a website.
If your code is CPU-bound, consider using the multiprocessing library instead.
Multiprocessing involves creation of new processes to cater to your needs.
If your application is doing a lot of cpu intensive tasks eg. calculating lots
of different prime numbers, then it can leverage multiple processes to perform
the operation.
- asyncio: its only one process and only one thread is doing all the work.
How it works will be explored further in this tutorial.
"""
def odds(start, stop):
"""
generators: something in python that produces a sequence of values.
This function generates a sequence of odd values from a given starting point
to a given endpoint. When this function is called, it actually doesn't run
this and return values, instead it returns a generator object.
To fetch values from this generator function, you need to use the built in
function next() on the generator object.
This func uses special keyword called yield, what yield does is, it returns
value to the caller and pauses further execution of the function, when
another caller makes a call to this function, yield will resume its
execution, generate another value, return it, pause the execution. This can
keep repeating for infinite times, but here we are restricting it to
(stop - start) times.
Once the generator reaches its limit, it returns a StopIteration exception
which informs the caller that this is the end.
"""
for odd in range(start, stop + 1, 2):
yield odd
g1 = odds(3, 15)
print(g1)
print(next(g1)) # one way of fetching the values
g2 = odds(3, 15)
print(list(g2)) # will store all the values in a list
# when you call list() of generator, it internally calls next() until the
# generator exausts. list() internally handles the StopIteration exception.
# NOTE: one risk with using list() on generator objects is that, generators
# can produce infinite values, if you are to store them all in a list, you
# probably will run out of memory and the program will crash.
g3 = odds(3, 15)
odd_nums = []
for o in g3:
if o == 13:
break
odd_nums.append(o)
print(odd_nums)
# another way of iterating over all the values of generator. Here even if the
# generator is infinite, we have the option to introduce our own breakpoint to
# avoid the system from crashing.
def main():
# using list comprehension
odds1 = [odd for odd in odds(7, 21)]
print(odds1)
# creating a tuple
odds2 = tuple(odds(21, 29))
print(odds2)
if __name__ == '__main__':
print()
main()
| 36.858824 | 80 | 0.736355 |
444be26ed37e910b00ac64fa890d4ba9ee8e3093
| 148 |
py
|
Python
|
setup.py
|
ITD27M01/cloudsyaml
|
7607aea47e91242ba8b0c69d8df208781eb313e5
|
[
"MIT"
] | null | null | null |
setup.py
|
ITD27M01/cloudsyaml
|
7607aea47e91242ba8b0c69d8df208781eb313e5
|
[
"MIT"
] | 1 |
2021-06-30T09:15:07.000Z
|
2021-06-30T12:20:20.000Z
|
setup.py
|
ITD27M01/cloudsyaml
|
7607aea47e91242ba8b0c69d8df208781eb313e5
|
[
"MIT"
] | null | null | null |
import setuptools
try:
import multiprocessing
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True
)
| 13.454545 | 34 | 0.695946 |
baa266bb34a8176d920784291835d9c13dbafe05
| 26 |
py
|
Python
|
myven/lib/python3.8/site-packages/ansible/modules/cloud/ovirt/_ovirt_external_providers.py
|
baltham/dne-dna-code
|
4a13309a790a670d2f07e635c9264a0c29976c6a
|
[
"MIT"
] | 1 |
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
ansible/modules/cloud/ovirt/_ovirt_external_providers.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ansible/modules/cloud/ovirt/_ovirt_external_providers.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
ovirt_external_provider.py
| 26 | 26 | 0.961538 |
2f279fe862930db7c1aab4a78c6dab1ce2a4370b
| 1,614 |
py
|
Python
|
setup.py
|
cristiandobre-rinf/nwebsocket
|
22eed21419df8632766137f70dda2abc230d6684
|
[
"MIT"
] | null | null | null |
setup.py
|
cristiandobre-rinf/nwebsocket
|
22eed21419df8632766137f70dda2abc230d6684
|
[
"MIT"
] | null | null | null |
setup.py
|
cristiandobre-rinf/nwebsocket
|
22eed21419df8632766137f70dda2abc230d6684
|
[
"MIT"
] | null | null | null |
"""Setuptools entry point."""
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
dirname = os.path.dirname(__file__)
readme_filename = os.path.join(dirname, 'README.rst')
description = 'WebSocket client without async'
with open(readme_filename, 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(name='nwebsocket',
version='1.0.0',
description=description,
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://github.com/cristidbr-adapta/nwebsocket',
author='Cristian Dobre',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Framework :: Jupyter',
'Framework :: Jupyter :: JupyterLab',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
packages=['nwebsocket'],
install_requires=[
'curio',
'wsproto'
],
tests_require=['pytest', 'pytest-asyncio'],
zip_safe=False,
python_requires=">=3.7",
)
| 31.647059 | 59 | 0.610285 |
100566848c4425a7f9e93cf23abc43b840644cfe
| 2,427 |
py
|
Python
|
pybuildkite/artifacts.py
|
savithakj/pybuildkite
|
6aa5a71caadc092dbd9515eae4523ebf2429c054
|
[
"BSD-2-Clause"
] | null | null | null |
pybuildkite/artifacts.py
|
savithakj/pybuildkite
|
6aa5a71caadc092dbd9515eae4523ebf2429c054
|
[
"BSD-2-Clause"
] | null | null | null |
pybuildkite/artifacts.py
|
savithakj/pybuildkite
|
6aa5a71caadc092dbd9515eae4523ebf2429c054
|
[
"BSD-2-Clause"
] | null | null | null |
from pybuildkite.client import Client
class Artifacts(Client):
"""
Artifacts operations for the Buildkite API
"""
def __init__(self, client, base_url):
"""
Construct the class
:param client: API Client
:param base_url: Base Url
"""
self.client = client
self.path = base_url + "organizations/{}/pipelines/{}/builds/{}/"
def list_artifacts_for_job(self, organization, pipeline, build):
"""
Returns a paginated list of a job’s artifacts.
:param organization: organization slug
:param pipeline: pipeline slug
:param build: build number
:return: Returns a paginated list of a job’s artifacts.
"""
url = self.path + "artifacts/"
return self.client.get(url.format(organization, pipeline, build))
def list_artifacts_for_build(self, organization, pipeline, build, job):
"""
Returns a paginated list of a build’s artifacts across all of its jobs.
:param organization: organization slug
:param pipeline: pipeline slug
:param build: build number
:param job: job id
:return: Returns a paginated list of a build’s artifacts across all of its jobs.
"""
url = self.path + "jobs/{}/artifacts/"
return self.client.get(url.format(organization, pipeline, build, job))
def get_artifact(self, organization, pipeline, build, job, artifact):
"""
Returns an artifact.
:param organization: organization slug
:param pipeline: pipeline slug
:param build: build number
:param job: job id
:param artifact: artifact id
:return: Returns an artifact.
"""
url = self.path + "jobs/{}/artifacts/{}/"
return self.client.get(url.format(organization, pipeline, build, job, artifact))
def download_artifact(self, organization, pipeline, build, job, artifact):
"""
Returns a URL for downloading an artifact.
:param organization: organization slug
:param pipeline: pipeline slug
:param build: build number
:param job: job id
:param artifact: artifact id
:return: Returns a URL for downloading an artifact.
"""
url = self.path + "jobs/{}/artifacts/{}/download/"
return self.client.get(url.format(organization, pipeline, build, job, artifact))
| 34.671429 | 88 | 0.625464 |
e1fcfe1a006dc3ab67a2318c7cdd82291409f047
| 212 |
py
|
Python
|
vega/algorithms/nas/sr_ea/__init__.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 12 |
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
vega/algorithms/nas/sr_ea/__init__.py
|
Lzc06/vega
|
852d2f57e21caed11473ddc96397124561eacf8a
|
[
"MIT"
] | 3 |
2021-03-31T20:15:40.000Z
|
2022-02-09T23:50:46.000Z
|
built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/algorithms/nas/sr_ea/__init__.py
|
Huawei-Ascend/modelzoo
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
[
"Apache-2.0"
] | 2 |
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
from .sr_random import SRRandom
from .sr_ea_codec import SRCodec
from .sr_mutate import SRMutate
import os
if os.environ['BACKEND_TYPE'] == 'PYTORCH':
from .sr_ea_trainer_callback import SREATrainerCallback
| 26.5 | 59 | 0.806604 |
9d799ad50a90d018bbf730542367429d6a99b774
| 2,747 |
py
|
Python
|
oauth2client/contrib/django_util/models.py
|
anleo1000/oauth2client
|
64969bbaa53af4b9b4eda089c7d3a95976b6bb13
|
[
"Apache-2.0"
] | null | null | null |
oauth2client/contrib/django_util/models.py
|
anleo1000/oauth2client
|
64969bbaa53af4b9b4eda089c7d3a95976b6bb13
|
[
"Apache-2.0"
] | null | null | null |
oauth2client/contrib/django_util/models.py
|
anleo1000/oauth2client
|
64969bbaa53af4b9b4eda089c7d3a95976b6bb13
|
[
"Apache-2.0"
] | 1 |
2019-11-13T12:39:17.000Z
|
2019-11-13T12:39:17.000Z
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes used for the Django ORM storage."""
import base64
import pickle
from django.db import models
from django.utils import encoding
import jsonpickle
import oauth2client
class CredentialsField(models.Field):
"""Django ORM field for storing OAuth2 Credentials."""
def __init__(self, *args, **kwargs):
if 'null' not in kwargs:
kwargs['null'] = True
super(CredentialsField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'BinaryField'
def from_db_value(self, value, expression, connection, context=None):
"""Overrides ``models.Field`` method. This converts the value
returned from the database to an instance of this class.
"""
return self.to_python(value)
def to_python(self, value):
"""Overrides ``models.Field`` method. This is used to convert
bytes (from serialization etc) to an instance of this class"""
if value is None:
return None
elif isinstance(value, oauth2client.client.Credentials):
return value
else:
try:
return jsonpickle.decode(
base64.b64decode(encoding.smart_bytes(value)).decode())
except ValueError:
return pickle.loads(
base64.b64decode(encoding.smart_bytes(value)))
def get_prep_value(self, value):
"""Overrides ``models.Field`` method. This is used to convert
the value from an instances of this class to bytes that can be
inserted into the database.
"""
if value is None:
return None
else:
return encoding.smart_text(
base64.b64encode(jsonpickle.encode(value).encode()))
def value_to_string(self, obj):
"""Convert the field value from the provided model to a string.
Used during model serialization.
Args:
obj: db.Model, model object
Returns:
string, the serialized field value
"""
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
| 33.096386 | 75 | 0.652348 |
26b89e7f9d8707ce54a0bb1a618ab4d1517dd34d
| 6,005 |
py
|
Python
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_elastic_pool_database_activities_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 2 |
2019-08-23T21:14:00.000Z
|
2021-09-07T18:32:34.000Z
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_elastic_pool_database_activities_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 4 |
2019-04-17T17:57:49.000Z
|
2020-04-24T21:11:22.000Z
|
sdk/sql/azure-mgmt-sql/azure/mgmt/sql/aio/operations/_elastic_pool_database_activities_operations.py
|
beltr0n/azure-sdk-for-python
|
2f7fb8bee881b0fc0386a0ad5385755ceedd0453
|
[
"MIT"
] | 1 |
2019-04-05T18:17:43.000Z
|
2019-04-05T18:17:43.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ElasticPoolDatabaseActivitiesOperations:
"""ElasticPoolDatabaseActivitiesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_elastic_pool(
self,
resource_group_name: str,
server_name: str,
elastic_pool_name: str,
**kwargs
) -> AsyncIterable["models.ElasticPoolDatabaseActivityListResult"]:
"""Returns activity on databases inside of an elastic pool.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param elastic_pool_name: The name of the elastic pool.
:type elastic_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ElasticPoolDatabaseActivityListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.sql.models.ElasticPoolDatabaseActivityListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ElasticPoolDatabaseActivityListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2014-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_elastic_pool.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'elasticPoolName': self._serialize.url("elastic_pool_name", elastic_pool_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ElasticPoolDatabaseActivityListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_elastic_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/elasticPools/{elasticPoolName}/elasticPoolDatabaseActivity'} # type: ignore
| 49.221311 | 233 | 0.671107 |
985ea10036da84b513170c08e0d739f6845b797a
| 33,396 |
py
|
Python
|
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
|
rheaparekh/azure-cli
|
c808ba40b95235a5216fa42e91f4937c085b53cf
|
[
"MIT"
] | 1 |
2021-04-22T09:20:56.000Z
|
2021-04-22T09:20:56.000Z
|
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
|
rheaparekh/azure-cli
|
c808ba40b95235a5216fa42e91f4937c085b53cf
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/backup/custom_wl.py
|
rheaparekh/azure-cli
|
c808ba40b95235a5216fa42e91f4937c085b53cf
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from uuid import uuid4
from datetime import datetime, timedelta, timezone
# pylint: disable=import-error
# pylint: disable=broad-except
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from knack.log import get_logger
from azure.mgmt.recoveryservicesbackup.models import AzureVMAppContainerProtectionContainer, \
AzureWorkloadBackupRequest, ProtectedItemResource, AzureRecoveryServiceVaultProtectionIntent, TargetRestoreInfo, \
RestoreRequestResource, BackupRequestResource, ProtectionIntentResource, SQLDataDirectoryMapping, \
ProtectionContainerResource, AzureWorkloadSAPHanaRestoreRequest, AzureWorkloadSQLRestoreRequest, \
AzureWorkloadSAPHanaPointInTimeRestoreRequest, AzureWorkloadSQLPointInTimeRestoreRequest, \
AzureVmWorkloadSAPHanaDatabaseProtectedItem, AzureVmWorkloadSQLDatabaseProtectedItem
from azure.cli.core.util import CLIError
from azure.cli.command_modules.backup._validators import datetime_type
from azure.cli.command_modules.backup._client_factory import backup_workload_items_cf, \
protectable_containers_cf, backup_protection_containers_cf, backup_protected_items_cf, recovery_points_crr_cf
import azure.cli.command_modules.backup.custom_help as cust_help
import azure.cli.command_modules.backup.custom_common as common
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, ValidationError
fabric_name = "Azure"
logger = get_logger(__name__)
# Mapping of workload type
workload_type_map = {'MSSQL': 'SQLDataBase',
'SAPHANA': 'SAPHanaDatabase',
'SQLDataBase': 'SQLDataBase',
'SAPHanaDatabase': 'SAPHanaDatabase',
'SAPASE': 'SAPAseDatabase'}
# Mapping of module name
module_map = {'sqldatabase': 'sql_database',
'saphanadatabase': 'sap_hana_database',
'sapasedatabase': 'sap_ase_database'}
# Mapping of attribute name
attr_map = {'sqldatabase': 'SQLDatabase',
'saphanadatabase': 'SAPHanaDatabase',
'sapasedatabase': 'SAPAseDatabase'}
protectable_item_type_map = {'SQLDatabase': 'SQLDataBase',
'HANADataBase': 'SAPHanaDatabase',
'SAPHanaDatabase': 'SAPHanaDatabase',
'HANAInstance': 'SAPHanaSystem',
'SAPHanaSystem': 'SAPHanaSystem',
'SQLInstance': 'SQLInstance',
'SQLAG': 'SQLAG'}
def show_wl_policy(client, resource_group_name, vault_name, name):
return [client.get(vault_name, resource_group_name, name)]
def list_wl_policies(client, resource_group_name, vault_name, workload_type, backup_management_type):
if workload_type is None:
raise RequiredArgumentMissingError(
"""
Workload type is required for Azure Workload. Use --workload-type.
""")
if backup_management_type is None:
raise CLIError(
"""
Backup Management Type needs to be specified for Azure Workload.
""")
workload_type = _check_map(workload_type, workload_type_map)
filter_string = cust_help.get_filter_string({
'backupManagementType': backup_management_type,
'workloadType': workload_type})
policies = client.list(vault_name, resource_group_name, filter_string)
return cust_help.get_list_from_paged_response(policies)
def list_protectable_containers(cmd, resource_group_name, vault_name, container_type="AzureWorkload"):
filter_string = cust_help.get_filter_string({
'backupManagementType': container_type})
client = protectable_containers_cf(cmd.cli_ctx)
paged_containers = client.list(vault_name, resource_group_name, fabric_name, filter_string)
return cust_help.get_list_from_paged_response(paged_containers)
def register_wl_container(cmd, client, vault_name, resource_group_name, workload_type, resource_id, container_type):
if not cust_help.is_id(resource_id):
raise CLIError(
"""
Resource ID is not a valid one.
""")
workload_type = _check_map(workload_type, workload_type_map)
container_name = resource_id.split('/')[-1]
containers = list_protectable_containers(cmd, resource_group_name, vault_name)
for container in containers:
if cust_help.get_resource_id(container.properties.container_id) == cust_help.get_resource_id(resource_id):
container_name = container.name
break
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container unavailable or already registered.
""")
properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type,
source_resource_id=resource_id,
workload_type=workload_type)
param = ProtectionContainerResource(properties=properties)
# Trigger register and wait for completion
result = client.register(vault_name, resource_group_name, fabric_name, container_name, param, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def re_register_wl_container(cmd, client, vault_name, resource_group_name, workload_type,
container_name, container_type):
workload_type = _check_map(workload_type, workload_type_map)
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container name passed cannot be a friendly name.
Please pass a native container name.
""")
backup_cf = backup_protection_containers_cf(cmd.cli_ctx)
containers = common.list_containers(backup_cf, resource_group_name, vault_name, container_type)
source_resource_id = None
for container in containers:
if container.name == container_name:
source_resource_id = container.properties.source_resource_id
break
if not source_resource_id:
raise CLIError(
"""
No such registered container exists.
""")
properties = AzureVMAppContainerProtectionContainer(backup_management_type=container_type,
workload_type=workload_type,
operation_type='Reregister',
source_resource_id=source_resource_id)
param = ProtectionContainerResource(properties=properties)
# Trigger register and wait for completion
result = client.register(vault_name, resource_group_name, fabric_name, container_name, param, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def unregister_wl_container(cmd, client, vault_name, resource_group_name, container_name):
if not cust_help.is_native_name(container_name):
raise CLIError(
"""
Container name passed cannot be a friendly name.
Please pass a native container name.
""")
# Trigger unregister and wait for completion
result = client.unregister(vault_name, resource_group_name, fabric_name, container_name, raw=True)
return cust_help.track_register_operation(cmd.cli_ctx, result, vault_name, resource_group_name, container_name)
def update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy):
if item.properties.backup_management_type != policy.properties.backup_management_type:
raise CLIError(
"""
The policy type should match with the workload being protected.
Use the relevant get-default policy command and use it to update the policy for the workload.
""")
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
raise InvalidArgumentValueError("Item must be either of type SQLDataBase or SAPHanaDatabase.")
item_properties = _get_protected_item_instance(backup_item_type)
item_properties.policy_id = policy.id
param = ProtectedItemResource(properties=item_properties)
# Update policy
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_uri, item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def resume_protection(cmd, client, resource_group_name, vault_name, item, policy):
return update_policy_for_item(cmd, client, resource_group_name, vault_name, item, policy)
def initialize_protectable_items(client, resource_group_name, vault_name, container_name, workload_type):
workload_type = _check_map(workload_type, workload_type_map)
filter_string = cust_help.get_filter_string({
'backupManagementType': 'AzureWorkload',
'workloadType': workload_type})
return client.inquire(vault_name, resource_group_name, fabric_name, container_name, filter_string)
def create_policy(client, resource_group_name, vault_name, policy_name, policy, workload_type):
workload_type = _check_map(workload_type, workload_type_map)
policy_object = cust_help.get_policy_from_json(client, policy)
policy_object.properties.backup_management_type = "AzureWorkload"
policy_object.properties.workload_type = workload_type
policy_object.name = policy_name
return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
def set_policy(client, resource_group_name, vault_name, policy, policy_name, fix_for_inconsistent_items):
if policy_name is None:
raise CLIError(
"""
Policy name is required for set policy.
""")
if policy is not None:
policy_object = cust_help.get_policy_from_json(client, policy)
else:
if fix_for_inconsistent_items:
policy_object = common.show_policy(client, resource_group_name, vault_name, policy_name)
policy_object.properties.make_policy_consistent = True
else:
raise CLIError(
"""
Please provide policy object.
""")
return client.create_or_update(vault_name, resource_group_name, policy_name, policy_object)
def show_protectable_item(items, name, server_name, protectable_item_type):
protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map)
# Name filter
if cust_help.is_native_name(name):
filtered_items = [item for item in items if item.name.lower() == name.lower()]
else:
filtered_items = [item for item in items if item.properties.friendly_name.lower() == name.lower()]
# Server Name filter
filtered_items = [item for item in filtered_items if item.properties.server_name.lower() == server_name.lower()]
# Protectable Item Type filter
filtered_items = [item for item in filtered_items if
item.properties.protectable_item_type.lower() == protectable_item_type.lower()]
return cust_help.get_none_one_or_many(filtered_items)
def show_protectable_instance(items, server_name, protectable_item_type):
if server_name is None:
raise RequiredArgumentMissingError("""
Server name missing. Please provide a valid server name using --target-server-name.
""")
if protectable_item_type is None:
az_error = RequiredArgumentMissingError("""
Protectable item type missing. Please provide a valid protectable item type name using --target-server-type.
""")
recommendation_text = "{} are the allowed values.".format(str(list(protectable_item_type_map.keys())))
az_error.set_recommendation(recommendation_text)
raise az_error
protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map)
# Protectable Item Type filter
filtered_items = [item for item in items if
item.properties.protectable_item_type is not None and
item.properties.protectable_item_type.lower() == protectable_item_type.lower()]
# Server Name filter
filtered_items = [item for item in filtered_items if item.properties.server_name.lower() == server_name.lower()]
return cust_help.get_none_one_or_many(filtered_items)
def list_protectable_items(client, resource_group_name, vault_name, workload_type,
backup_management_type="AzureWorkload", container_uri=None, protectable_item_type=None):
workload_type = _check_map(workload_type, workload_type_map)
if protectable_item_type is not None:
protectable_item_type = _check_map(protectable_item_type, protectable_item_type_map)
filter_string = cust_help.get_filter_string({
'backupManagementType': backup_management_type,
'workloadType': workload_type})
# Items list
items = client.list(vault_name, resource_group_name, filter_string)
paged_items = cust_help.get_list_from_paged_response(items)
if protectable_item_type is not None:
# Protectable Item Type filter
paged_items = [item for item in paged_items if
item.properties.protectable_item_type.lower() == protectable_item_type.lower()]
if container_uri:
return [item for item in paged_items if
cust_help.get_protection_container_uri_from_id(item.id).lower() == container_uri.lower()]
return paged_items
def list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item, start_date=None, end_date=None,
extended_info=None, use_secondary_region=None):
# Get container and item URIs
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
query_end_date, query_start_date = cust_help.get_query_dates(end_date, start_date)
if query_end_date and query_start_date:
cust_help.is_range_valid(query_start_date, query_end_date)
filter_string = cust_help.get_filter_string({
'startDate': query_start_date,
'endDate': query_end_date})
if cmd.name.split()[2] == 'show-log-chain' or extended_info is not None:
filter_string = cust_help.get_filter_string({
'restorePointQueryType': 'Log',
'startDate': query_start_date,
'endDate': query_end_date,
'extendedInfo': extended_info})
if use_secondary_region:
client = recovery_points_crr_cf(cmd.cli_ctx)
# Get recovery points
recovery_points = client.list(vault_name, resource_group_name, fabric_name, container_uri, item_uri, filter_string)
paged_recovery_points = cust_help.get_list_from_paged_response(recovery_points)
return paged_recovery_points
def enable_protection_for_azure_wl(cmd, client, resource_group_name, vault_name, policy_object, protectable_item):
# Get protectable item.
protectable_item_object = protectable_item
protectable_item_type = protectable_item_object.properties.protectable_item_type
if protectable_item_type.lower() not in ["sqldatabase", "sqlinstance", "saphanadatabase", "saphanasystem"]:
raise CLIError(
"""
Protectable Item must be either of type SQLDataBase, HANADatabase, HANAInstance or SQLInstance.
""")
item_name = protectable_item_object.name
container_name = protectable_item_object.id.split('/')[12]
cust_help.validate_policy(policy_object)
policy_id = policy_object.id
properties = _get_protected_item_instance(protectable_item_type)
properties.backup_management_type = 'AzureWorkload'
properties.policy_id = policy_id
properties.workload_type = protectable_item_type
param = ProtectionContainerResource(properties=properties)
# Trigger enable protection and wait for completion
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_name, item_name, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def backup_now(cmd, client, resource_group_name, vault_name, item, retain_until, backup_type,
enable_compression=False):
if backup_type is None:
raise RequiredArgumentMissingError("Backup type missing. Please provide a valid backup type using "
"--backup-type argument.")
message = "For SAPHANA and SQL workload, retain-until parameter value will be overridden by the underlying policy"
if (retain_until is not None and backup_type != 'CopyOnlyFull'):
logger.warning(message)
retain_until = datetime.now(timezone.utc) + timedelta(days=30)
if retain_until is None:
retain_until = datetime.now(timezone.utc) + timedelta(days=30)
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and enable_compression:
raise CLIError(
"""
Enable compression is not applicable for SAPHanaDatabase item type.
""")
if cust_help.is_hana(backup_item_type) and backup_type in ['Log', 'CopyOnlyFull', 'Incremental']:
raise CLIError(
"""
Backup type cannot be Log, CopyOnlyFull, Incremental for SAPHanaDatabase Adhoc backup.
""")
properties = AzureWorkloadBackupRequest(backup_type=backup_type, enable_compression=enable_compression,
recovery_point_expiry_time_in_utc=retain_until)
param = BackupRequestResource(properties=properties)
# Trigger backup and wait for completion
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def disable_protection(cmd, client, resource_group_name, vault_name, item, delete_backup_data):
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
raise CLIError(
"""
Item must be either of type SQLDataBase or SAPHanaDatabase.
""")
if delete_backup_data:
result = client.delete(vault_name, resource_group_name, fabric_name, container_uri, item_uri, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
properties = _get_protected_item_instance(backup_item_type)
properties.protection_state = 'ProtectionStopped'
properties.policy_id = ''
param = ProtectedItemResource(properties=properties)
# Trigger disable protection and wait for completion
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_uri, item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def undelete_protection(cmd, client, resource_group_name, vault_name, item):
container_uri = cust_help.get_protection_container_uri_from_id(item.id)
item_uri = cust_help.get_protected_item_uri_from_id(item.id)
backup_item_type = item_uri.split(';')[0]
if not cust_help.is_sql(backup_item_type) and not cust_help.is_hana(backup_item_type):
raise ValidationError(
"""
Item must be either of type SQLDataBase or SAPHanaDatabase.
""")
properties = _get_protected_item_instance(backup_item_type)
properties.protection_state = 'ProtectionStopped'
properties.policy_id = ''
properties.is_rehydrate = True
param = ProtectedItemResource(properties=properties)
result = client.create_or_update(vault_name, resource_group_name, fabric_name,
container_uri, item_uri, param, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def auto_enable_for_azure_wl(client, resource_group_name, vault_name, policy_object, protectable_item):
protectable_item_object = protectable_item
item_id = protectable_item_object.id
protectable_item_type = protectable_item_object.properties.protectable_item_type
if protectable_item_type.lower() != 'sqlinstance':
raise CLIError(
"""
Protectable Item can only be of type SQLInstance.
""")
policy_id = policy_object.id
properties = AzureRecoveryServiceVaultProtectionIntent(backup_management_type='AzureWorkload',
policy_id=policy_id, item_id=item_id)
param = ProtectionIntentResource(properties=properties)
intent_object_name = str(uuid4())
try:
client.create_or_update(vault_name, resource_group_name, fabric_name, intent_object_name, param)
return {'status': True}
except Exception:
return {'status': False}
def disable_auto_for_azure_wl(client, resource_group_name, vault_name, item_name):
if not cust_help.is_native_name(item_name):
raise CLIError(
"""
Protectable Item name must be native.
""")
protectable_item_type = item_name.split(';')[0]
if protectable_item_type.lower() != 'sqlinstance':
raise CLIError(
"""
Protectable Item can only be of type SQLInstance.
""")
intent_object_name = str(uuid4())
try:
client.delete(vault_name, resource_group_name, fabric_name, intent_object_name)
return {'status': True}
except Exception:
return {'status': False}
def list_workload_items(cmd, vault_name, resource_group_name, container_name,
container_type="AzureWorkload", workload_type="SQLInstance"):
filter_string = cust_help.get_filter_string({
'backupManagementType': container_type,
'workloadItemType': workload_type})
items = backup_workload_items_cf(cmd.cli_ctx).list(vault_name, resource_group_name,
fabric_name, container_name, filter_string)
return cust_help.get_list_from_paged_response(items)
def restore_azure_wl(cmd, client, resource_group_name, vault_name, recovery_config):
recovery_config_object = cust_help.get_or_read_json(recovery_config)
restore_mode = recovery_config_object['restore_mode']
container_uri = recovery_config_object['container_uri']
item_uri = recovery_config_object['item_uri']
recovery_point_id = recovery_config_object['recovery_point_id']
log_point_in_time = recovery_config_object['log_point_in_time']
item_type = recovery_config_object['item_type']
source_resource_id = recovery_config_object['source_resource_id']
database_name = recovery_config_object['database_name']
container_id = recovery_config_object['container_id']
alternate_directory_paths = recovery_config_object['alternate_directory_paths']
recovery_mode = recovery_config_object['recovery_mode']
filepath = recovery_config_object['filepath']
# Construct trigger restore request object
trigger_restore_properties = _get_restore_request_instance(item_type, log_point_in_time)
trigger_restore_properties.recovery_type = restore_mode
if restore_mode == 'AlternateLocation':
if recovery_mode != "FileRecovery":
setattr(trigger_restore_properties, 'source_resource_id', source_resource_id)
setattr(trigger_restore_properties, 'target_info', TargetRestoreInfo(overwrite_option='Overwrite',
database_name=database_name,
container_id=container_id))
if 'sql' in item_type.lower():
directory_map = []
for i in alternate_directory_paths:
directory_map.append(SQLDataDirectoryMapping(mapping_type=i[0], source_path=i[1],
source_logical_name=i[2], target_path=i[3]))
setattr(trigger_restore_properties, 'alternate_directory_paths', directory_map)
else:
target_info = TargetRestoreInfo(overwrite_option='Overwrite', container_id=container_id,
target_directory_for_file_restore=filepath)
setattr(trigger_restore_properties, 'target_info', target_info)
trigger_restore_properties.recovery_mode = recovery_mode
if log_point_in_time is not None:
setattr(trigger_restore_properties, 'point_in_time', datetime_type(log_point_in_time))
if 'sql' in item_type.lower():
setattr(trigger_restore_properties, 'should_use_alternate_target_location', True)
setattr(trigger_restore_properties, 'is_non_recoverable', False)
trigger_restore_request = RestoreRequestResource(properties=trigger_restore_properties)
# Trigger restore and wait for completion
result = client.trigger(vault_name, resource_group_name, fabric_name, container_uri,
item_uri, recovery_point_id, trigger_restore_request, raw=True)
return cust_help.track_backup_job(cmd.cli_ctx, result, vault_name, resource_group_name)
def show_recovery_config(cmd, client, resource_group_name, vault_name, restore_mode, container_name, item_name,
rp_name, target_item, target_item_name, log_point_in_time, from_full_rp_name,
filepath, target_container):
if log_point_in_time is not None:
datetime_type(log_point_in_time)
if restore_mode == 'AlternateWorkloadRestore':
if target_item is None:
raise CLIError(
"""
Target Item must be provided.
""")
protectable_item_type = target_item.properties.protectable_item_type
if protectable_item_type.lower() not in ["sqlinstance", "saphanasystem"]:
raise CLIError(
"""
Target Item must be either of type HANAInstance or SQLInstance.
""")
if restore_mode == 'RestoreAsFiles' and target_container is None:
raise CLIError("Target Container must be provided.")
if rp_name is None and log_point_in_time is None:
raise CLIError(
"""
Log point in time or recovery point name must be provided.
""")
item = common.show_item(cmd, backup_protected_items_cf(cmd.cli_ctx), resource_group_name, vault_name,
container_name, item_name, "AzureWorkload")
cust_help.validate_item(item)
item_type = item.properties.workload_type
item_name = item.name
if not cust_help.is_sql(item_type) and not cust_help.is_hana(item_type):
raise CLIError(
"""
Item must be either of type SQLDataBase or SAPHanaDatabase.
""")
# Mapping of restore mode
restore_mode_map = {'OriginalWorkloadRestore': 'OriginalLocation',
'AlternateWorkloadRestore': 'AlternateLocation',
'RestoreAsFiles': 'AlternateLocation'}
if rp_name is None and restore_mode == "RestoreAsFiles" and from_full_rp_name is not None:
rp_name = from_full_rp_name
rp_name = rp_name if rp_name is not None else 'DefaultRangeRecoveryPoint'
if rp_name == 'DefaultRangeRecoveryPoint':
recovery_points = list_wl_recovery_points(cmd, client, resource_group_name, vault_name, item,
None, None, True)
recovery_points = [rp for rp in recovery_points if rp.name == rp_name]
if recovery_points == []:
raise CLIError(
"""
Invalid input.
""")
recovery_point = recovery_points[0]
else:
recovery_point = common.show_recovery_point(cmd, client, resource_group_name, vault_name, container_name,
item_name, rp_name, item_type,
backup_management_type="AzureWorkload")
alternate_directory_paths = []
if 'sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore':
items = list_workload_items(cmd, vault_name, resource_group_name, container_name)
for titem in items:
if titem.properties.friendly_name == target_item.properties.friendly_name:
if titem.properties.server_name == target_item.properties.server_name:
for path in recovery_point.properties.extended_info.data_directory_paths:
target_path = cust_help.get_target_path(path.type, path.path, path.logical_name,
titem.properties.data_directory_paths)
alternate_directory_paths.append((path.type, path.path, path.logical_name, target_path))
db_name = None
if restore_mode == 'AlternateWorkloadRestore':
friendly_name = target_item.properties.friendly_name
db_name = friendly_name + '/' + target_item_name
container_id = None
if restore_mode == 'AlternateWorkloadRestore':
container_id = '/'.join(target_item.id.split('/')[:-2])
if not ('sql' in item_type.lower() and restore_mode == 'AlternateWorkloadRestore'):
alternate_directory_paths = None
recovery_mode = None
if restore_mode == 'RestoreAsFiles':
recovery_mode = 'FileRecovery'
container_id = target_container.id
return {
'restore_mode': restore_mode_map[restore_mode],
'container_uri': item.properties.container_name,
'item_uri': item_name,
'recovery_point_id': recovery_point.name,
'log_point_in_time': log_point_in_time,
'item_type': 'SQL' if 'sql' in item_type.lower() else 'SAPHana',
'source_resource_id': item.properties.source_resource_id,
'database_name': db_name,
'container_id': container_id,
'recovery_mode': recovery_mode,
'filepath': filepath,
'alternate_directory_paths': alternate_directory_paths}
def _get_restore_request_instance(item_type, log_point_in_time):
if item_type.lower() == "saphana":
if log_point_in_time is not None:
return AzureWorkloadSAPHanaPointInTimeRestoreRequest()
return AzureWorkloadSAPHanaRestoreRequest()
if item_type.lower() == "sql":
if log_point_in_time is not None:
return AzureWorkloadSQLPointInTimeRestoreRequest()
return AzureWorkloadSQLRestoreRequest()
return None
def _get_protected_item_instance(item_type):
if item_type.lower() == "saphanadatabase":
return AzureVmWorkloadSAPHanaDatabaseProtectedItem()
return AzureVmWorkloadSQLDatabaseProtectedItem()
def _check_map(item_type, item_type_map):
if item_type is None:
if item_type_map == workload_type_map:
az_error = RequiredArgumentMissingError("""
Workload type missing. Please enter a valid workload type using --workload-type.
""")
recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys())))
az_error.set_recommendation(recommendation_text)
raise az_error
if item_type_map == protectable_item_type_map:
az_error = RequiredArgumentMissingError("""
Protectable item type missing. Please enter a valid protectable item type using --protectable-item-type.
""")
recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys())))
az_error.set_recommendation(recommendation_text)
raise az_error
raise RequiredArgumentMissingError("Item type missing. Enter a valid item type.")
if item_type_map.get(item_type) is not None:
return item_type_map[item_type]
error_text = "{} is an invalid argument.".format(item_type)
recommendation_text = "{} are the allowed values.".format(str(list(item_type_map.keys())))
az_error = InvalidArgumentValueError(error_text)
az_error.set_recommendation(recommendation_text)
raise az_error
| 45.936726 | 119 | 0.695802 |
ace234d7a633c0ba9552121e19b0606c6a3b6aac
| 5,873 |
py
|
Python
|
src/jobwatcher/jobwatcher.py
|
agobeaux/aws-parallelcluster-node
|
69c7cfdc00bcec923e313b7094ccc93cf73043ba
|
[
"Apache-2.0"
] | null | null | null |
src/jobwatcher/jobwatcher.py
|
agobeaux/aws-parallelcluster-node
|
69c7cfdc00bcec923e313b7094ccc93cf73043ba
|
[
"Apache-2.0"
] | null | null | null |
src/jobwatcher/jobwatcher.py
|
agobeaux/aws-parallelcluster-node
|
69c7cfdc00bcec923e313b7094ccc93cf73043ba
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
from datetime import datetime
import boto3
from botocore.config import Config
from configparser import ConfigParser
from retrying import retry
from common.time_utils import seconds
from common.utils import (
get_asg_name,
get_asg_settings,
get_compute_instance_type,
get_instance_properties,
load_module,
sleep_remaining_loop_time,
)
LOOP_TIME = 60
UPDATE_INSTANCE_PROPERTIES_INTERVAL = 180
log = logging.getLogger(__name__)
JobwatcherConfig = collections.namedtuple(
"JobwatcherConfig", ["region", "scheduler", "stack_name", "pcluster_dir", "proxy_config"]
)
def _get_config():
"""
Get configuration from config file.
:return: configuration parameters
"""
config_file = "/etc/jobwatcher.cfg"
log.info("Reading %s", config_file)
config = ConfigParser()
config.read(config_file)
if config.has_option("jobwatcher", "loglevel"):
lvl = logging._levelNames[config.get("jobwatcher", "loglevel")]
logging.getLogger().setLevel(lvl)
region = config.get("jobwatcher", "region")
scheduler = config.get("jobwatcher", "scheduler")
stack_name = config.get("jobwatcher", "stack_name")
pcluster_dir = config.get("jobwatcher", "cfncluster_dir")
_proxy = config.get("jobwatcher", "proxy")
proxy_config = Config()
if _proxy != "NONE":
proxy_config = Config(proxies={"https": _proxy})
log.info(
"Configured parameters: region=%s scheduler=%s stack_name=%s pcluster_dir=%s proxy=%s",
region,
scheduler,
stack_name,
pcluster_dir,
_proxy,
)
return JobwatcherConfig(region, scheduler, stack_name, pcluster_dir, proxy_config)
def _poll_scheduler_status(config, asg_name, scheduler_module):
"""
Verify scheduler status and ask the ASG new nodes, if required.
:param config: JobwatcherConfig object
:param asg_name: ASG name
:param scheduler_module: scheduler module
"""
instance_type = None
instance_properties = None
update_instance_properties_timer = 0
while True:
start_time = datetime.now()
# Get instance properties
if not instance_properties or update_instance_properties_timer >= UPDATE_INSTANCE_PROPERTIES_INTERVAL:
logging.info("Refreshing compute instance properties")
update_instance_properties_timer = 0
new_instance_type = get_compute_instance_type(
config.region, config.proxy_config, config.stack_name, fallback=instance_type
)
if new_instance_type != instance_type:
instance_type = new_instance_type
instance_properties = get_instance_properties(config.region, config.proxy_config, instance_type)
update_instance_properties_timer += LOOP_TIME
# get current limits
_, current_desired, max_size = get_asg_settings(config.region, config.proxy_config, asg_name)
# Get current number of nodes
running = scheduler_module.get_busy_nodes()
# Get number of nodes requested
pending = scheduler_module.get_required_nodes(instance_properties, max_size)
log.info("%d nodes requested, %d nodes busy or unavailable", pending, running)
if pending < 0:
log.critical("Error detecting number of required nodes. The cluster will not scale up.")
elif pending == 0:
log.info("There are no pending jobs or the requirements on pending jobs cannot be satisfied. Noop.")
else:
# Check to make sure requested number of instances is within ASG limits
required = running + pending
if required <= current_desired:
log.info("%d nodes required, %d nodes in asg. Noop" % (required, current_desired))
else:
if required > max_size:
log.info(
"The number of required nodes %d is greater than max %d. Requesting max %d."
% (required, max_size, max_size)
)
else:
log.info(
"Setting desired to %d nodes, requesting %d more nodes from asg."
% (required, required - current_desired)
)
requested = min(required, max_size)
# update ASG
asg_client = boto3.client("autoscaling", region_name=config.region, config=config.proxy_config)
asg_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=requested)
sleep_remaining_loop_time(LOOP_TIME, start_time)
@retry(wait_fixed=seconds(LOOP_TIME))
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s")
log.info("jobwatcher startup")
try:
config = _get_config()
asg_name = get_asg_name(config.stack_name, config.region, config.proxy_config)
scheduler_module = load_module("jobwatcher.plugins." + config.scheduler)
_poll_scheduler_status(config, asg_name, scheduler_module)
except Exception as e:
log.exception("An unexpected error occurred: %s", e)
raise
if __name__ == "__main__":
main()
| 35.593939 | 119 | 0.671037 |
4254f4e486d0a97a59e79ee8432ad7cb2d6675f2
| 1,781 |
py
|
Python
|
tests/wikistats_tests.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
tests/wikistats_tests.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
tests/wikistats_tests.py
|
hperala/kontuwikibot
|
f409e6fb45adf4e553dc326d9fb3c0d29eda6373
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test cases for the WikiStats dataset."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 6a2dca07831010c753ebdfdd58599f3af3ee10bc $'
#
import sys
from pywikibot.data.wikistats import WikiStats, csv
from tests.aspects import unittest, TestCase
if sys.version_info[0] == 3:
basestring = (str, )
class WikiStatsTestCase(TestCase):
"""Test WikiStats dump."""
sites = {
'wikistats': {
'hostname': 'wikistats.wmflabs.org',
},
}
def test_sort(self):
ws = WikiStats()
data = ws.sorted('wikipedia', 'total')
top = data[0]
self.assertIn('prefix', top)
self.assertIn('total', top)
self.assertEqual(top['prefix'], 'en')
self.assertIsInstance(top['total'], basestring)
self.assertEqual(ws.languages_by_size('wikipedia')[0], 'en')
self.assertEqual(ws.languages_by_size('wikisource')[0], 'fr')
def test_csv(self):
if not csv:
raise unittest.SkipTest('unicodecsv not installed.')
ws = WikiStats()
data = ws.get_dict('wikipedia', 'csv')
self.assertIsInstance(data, dict)
self.assertIn('en', data)
self.assertIn('ht', data)
self.assertGreater(int(data['en']['total']), 4000000)
data = ws.get_dict
def test_xml(self):
ws = WikiStats()
data = ws.get_dict('wikisource', 'xml')
self.assertIsInstance(data, dict)
self.assertIn('en', data)
self.assertIn('id', data)
self.assertGreater(int(data['fr']['total']), 1600000)
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| 25.811594 | 69 | 0.608647 |
485ee681209f55afc587bfa30d7aaaa603755a5c
| 5,147 |
py
|
Python
|
homeassistant/components/remote/__init__.py
|
loraxx753/skynet
|
86a1b0a6c6a3f81bc92d4f61de6a9a6b9f964543
|
[
"Apache-2.0"
] | 2 |
2017-02-25T00:27:06.000Z
|
2017-02-25T03:09:30.000Z
|
homeassistant/components/remote/__init__.py
|
bytebility/home-assistant
|
6015274ee2486f797fd6ee8f5f2074a601953e03
|
[
"MIT"
] | 1 |
2017-03-10T22:17:06.000Z
|
2017-03-10T22:17:06.000Z
|
homeassistant/components/remote/__init__.py
|
bytebility/home-assistant
|
6015274ee2486f797fd6ee8f5f2074a601953e03
|
[
"MIT"
] | 2 |
2018-10-22T17:05:47.000Z
|
2021-09-22T10:52:31.000Z
|
"""
Component to interface with universal remote control devices.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/remote/
"""
import asyncio
from datetime import timedelta
import functools as ft
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import ToggleEntity
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
STATE_ON, SERVICE_TURN_ON, SERVICE_TURN_OFF, ATTR_ENTITY_ID)
from homeassistant.components import group
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
_LOGGER = logging.getLogger(__name__)
ATTR_ACTIVITY = 'activity'
ATTR_COMMAND = 'command'
ATTR_DEVICE = 'device'
DOMAIN = 'remote'
ENTITY_ID_ALL_REMOTES = group.ENTITY_ID_FORMAT.format('all_remotes')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GROUP_NAME_ALL_REMOTES = 'all remotes'
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SCAN_INTERVAL = timedelta(seconds=30)
SERVICE_SEND_COMMAND = 'send_command'
SERVICE_SYNC = 'sync'
REMOTE_SERVICE_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
})
REMOTE_SERVICE_TURN_ON_SCHEMA = REMOTE_SERVICE_SCHEMA.extend({
vol.Optional(ATTR_ACTIVITY): cv.string
})
REMOTE_SERVICE_SEND_COMMAND_SCHEMA = REMOTE_SERVICE_SCHEMA.extend({
vol.Required(ATTR_DEVICE): cv.string,
vol.Required(ATTR_COMMAND): cv.string,
})
def is_on(hass, entity_id=None):
"""Return if the remote is on based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_REMOTES
return hass.states.is_state(entity_id, STATE_ON)
def turn_on(hass, activity=None, entity_id=None):
"""Turn all or specified remote on."""
data = {ATTR_ACTIVITY: activity}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
"""Turn all or specified remote off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def send_command(hass, device, command, entity_id=None):
"""Send a command to a device."""
data = {ATTR_DEVICE: str(device), ATTR_COMMAND: command}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SEND_COMMAND, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for remotes."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_REMOTES)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_remote_service(service):
"""Handle calls to the remote services."""
target_remotes = component.async_extract_from_service(service)
activity_id = service.data.get(ATTR_ACTIVITY)
device = service.data.get(ATTR_DEVICE)
command = service.data.get(ATTR_COMMAND)
for remote in target_remotes:
if service.service == SERVICE_TURN_ON:
yield from remote.async_turn_on(activity=activity_id)
elif service.service == SERVICE_SEND_COMMAND:
yield from remote.async_send_command(
device=device, command=command)
else:
yield from remote.async_turn_off()
update_tasks = []
for remote in target_remotes:
if not remote.should_poll:
continue
update_coro = hass.loop.create_task(
remote.async_update_ha_state(True))
if hasattr(remote, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
hass.services.async_register(
DOMAIN, SERVICE_TURN_OFF, async_handle_remote_service,
descriptions.get(SERVICE_TURN_OFF),
schema=REMOTE_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_TURN_ON, async_handle_remote_service,
descriptions.get(SERVICE_TURN_ON),
schema=REMOTE_SERVICE_TURN_ON_SCHEMA)
hass.services.async_register(
DOMAIN, SERVICE_SEND_COMMAND, async_handle_remote_service,
descriptions.get(SERVICE_SEND_COMMAND),
schema=REMOTE_SERVICE_SEND_COMMAND_SCHEMA)
return True
class RemoteDevice(ToggleEntity):
"""Representation of a remote."""
def send_command(self, **kwargs):
"""Send a command to a device."""
raise NotImplementedError()
def async_send_command(self, **kwargs):
"""Send a command to a device.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.send_command, **kwargs))
| 32.575949 | 75 | 0.714397 |
0c3e7e7fdaddf1dcf00e9c0b14910988d5bb3b7e
| 3,056 |
py
|
Python
|
tests/ext/daemon_tests.py
|
ponsonio-aurea/cement
|
f55b465c6ef549f0136ed52e8f300f0caecf7bbd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ext/daemon_tests.py
|
ponsonio-aurea/cement
|
f55b465c6ef549f0136ed52e8f300f0caecf7bbd
|
[
"BSD-3-Clause"
] | null | null | null |
tests/ext/daemon_tests.py
|
ponsonio-aurea/cement
|
f55b465c6ef549f0136ed52e8f300f0caecf7bbd
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for cement.ext.ext_daemon."""
# NOTE: A large portion of ext_daemon is tested, but not included in
# Coverage report because nose/coverage lose sight of things after the
# sub-process is forked.
import os
#import tempfile
from random import random
from cement.core import handler, backend, log, hook, exc
from cement.utils import shell
from cement.utils import test
from cement.utils.misc import rando
from cement.ext import ext_daemon
APP = rando()[:12]
class DaemonExtTestCase(test.CementExtTestCase):
def setUp(self):
super(DaemonExtTestCase, self).setUp()
self.app = self.make_app()
def test_switch(self):
env = ext_daemon.Environment()
env.switch()
def test_switch_with_pid(self):
os.remove(self.tmp_file)
env = ext_daemon.Environment(pid_file=self.tmp_file)
env.switch()
self.ok(os.path.exists(self.tmp_file))
@test.raises(exc.FrameworkError)
def test_pid_exists(self):
env = ext_daemon.Environment(pid_file=self.tmp_file)
env.switch()
try:
self.ok(os.path.exists(self.tmp_file))
except exc.FrameworkError as e:
self.ok(e.msg.startswith('Process already running'))
raise
finally:
env = ext_daemon.Environment()
env.switch()
@test.raises(exc.FrameworkError)
def test_bogus_user(self):
rand = random()
try:
env = ext_daemon.Environment(user='cement_test_user%s' % rand)
except exc.FrameworkError as e:
self.ok(e.msg.startswith('Daemon user'))
raise
finally:
env = ext_daemon.Environment()
env.switch()
@test.raises(exc.FrameworkError)
def test_bogus_group(self):
rand = random()
try:
env = ext_daemon.Environment(group='cement_test_group%s' % rand)
except exc.FrameworkError as e:
self.ok(e.msg.startswith('Daemon group'))
raise
finally:
env = ext_daemon.Environment()
env.switch()
def test_daemon(self):
os.remove(self.tmp_file)
from cement.utils import shell
# Test in a sub-process to avoid Nose hangup
def target():
app = self.make_app('test', argv=['--daemon'],
extensions=['daemon'])
app.setup()
app.config.set('daemon', 'pid_file', self.tmp_file)
try:
# FIX ME: Can't daemonize, because nose loses sight of it
app.daemonize()
app.run()
finally:
app.close()
ext_daemon.cleanup(app)
p = shell.spawn_process(target)
p.join()
self.eq(p.exitcode, 0)
def test_daemon_not_passed(self):
app = self.make_app(APP, extensions=['daemon'])
app.setup()
app.config.set('daemon', 'pid_file', None)
try:
app.run()
finally:
ext_daemon.cleanup(app)
| 28.036697 | 76 | 0.590641 |
e0a47e096bdd84efb8e5cff717b6c9cd935666ff
| 758 |
py
|
Python
|
petycja_norweskie/themes/models.py
|
watchdogpolska/ankieta-norweskie
|
fefbabf8182452c01a3fcf8932707e5a539c9dfd
|
[
"MIT"
] | null | null | null |
petycja_norweskie/themes/models.py
|
watchdogpolska/ankieta-norweskie
|
fefbabf8182452c01a3fcf8932707e5a539c9dfd
|
[
"MIT"
] | 29 |
2017-04-17T01:41:44.000Z
|
2019-07-01T21:03:51.000Z
|
petycja_norweskie/themes/models.py
|
watchdogpolska/ankieta-norweskie
|
fefbabf8182452c01a3fcf8932707e5a539c9dfd
|
[
"MIT"
] | 1 |
2017-09-29T23:17:09.000Z
|
2017-09-29T23:17:09.000Z
|
# coding=utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
class ThemeQuerySet(models.QuerySet):
pass
class Theme(TimeStampedModel):
name = models.CharField(max_length=100, verbose_name=_("Name"))
description = models.TextField(verbose_name=_("Name"))
authorship = models.CharField(verbose_name=_("Authorship"), max_length=100)
prefix = models.CharField(max_length=25, unique=True, verbose_name=_("System name of theme prefix"))
objects = ThemeQuerySet.as_manager()
class Meta:
verbose_name = _("Theme")
verbose_name_plural = _("Themes")
ordering = ['created', ]
def __str__(self):
return self.name
| 30.32 | 104 | 0.718997 |
4355791bab46214bcf4e6ce3a4e5a182199193ce
| 26,585 |
py
|
Python
|
venv/lib/python2.7/site-packages/sklearn/tree/tests/test_tree.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python2.7/site-packages/sklearn/tree/tests/test_tree.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python2.7/site-packages/sklearn/tree/tests/test_tree.py
|
bopopescu/fbserver
|
e812dbc4dc0cbf2fda19473015a3d7e253718a19
|
[
"Apache-2.0"
] | 1 |
2020-07-23T19:26:19.000Z
|
2020-07-23T19:26:19.000Z
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
import numpy as np
from functools import partial
from itertools import product
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5, random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
leaf_count = node_counts[node_counts != 0] # drop inner nodes
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_memoryerror():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
| 35.072559 | 79 | 0.613541 |
289bb2720017b343a703824460c57fcba031a7ca
| 792 |
py
|
Python
|
templates/includes/search_form.html.py
|
amigcamel/taipei.py
|
12b24bf50aeeb4c3e0ad1ed1998d6d7e77b845dd
|
[
"MIT"
] | null | null | null |
templates/includes/search_form.html.py
|
amigcamel/taipei.py
|
12b24bf50aeeb4c3e0ad1ed1998d6d7e77b845dd
|
[
"MIT"
] | null | null | null |
templates/includes/search_form.html.py
|
amigcamel/taipei.py
|
12b24bf50aeeb4c3e0ad1ed1998d6d7e77b845dd
|
[
"MIT"
] | null | null | null |
BBBB BBBBBBBBBBBBBB BBBB
XXXXX XXXXXXXXBBB BBBBBBBBX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX gettext('Search') X XXXXXXXXXXX XXXXXXXX XXXXXXXXX
XXXXXX
BB BBBBBBBBBBBBBBBBBBBB
BB BBBBBBBBBBBBBBBBBBBBBBBBBBB BB B
XXXXXX XXXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
BBBB
XXXX XXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXX gettext('Everything') XXXXXXXXX
BBB BBBBBBBBBBBBB BBBBB BB BBBBBBBBBBBBBBBBBBBB
XXXXXXX XXXXXXXX
BB BBBBB BB BBBBBBBBBBBBBBBBBBBBXXXXXXXXBBBBBX
XXXXXXXXX
BBBBBB
XXXXXXXXX
XXXXXX
BBBBB
BBBBB
XXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXX gettext('Go') XX
XXXXXXX
| 27.310345 | 96 | 0.770202 |
68d283b4b3f880a072335673dcd7b5d5881d7814
| 463 |
py
|
Python
|
cpims/cpims/cpims-monthly.py
|
sizler20/cpims_update
|
23b86e40ca779b751383e268ad4fbf6a321ab211
|
[
"MIT"
] | null | null | null |
cpims/cpims/cpims-monthly.py
|
sizler20/cpims_update
|
23b86e40ca779b751383e268ad4fbf6a321ab211
|
[
"MIT"
] | null | null | null |
cpims/cpims/cpims-monthly.py
|
sizler20/cpims_update
|
23b86e40ca779b751383e268ad4fbf6a321ab211
|
[
"MIT"
] | 1 |
2022-02-27T13:36:47.000Z
|
2022-02-27T13:36:47.000Z
|
#!/usr/bin/python3.7
# /home/nmugaya/Projects/2019/Staging/Live/cpims/cpims
# Every Month at 6:45 am
# 45 6 1 * * cd lnk && python3.7 cpims-monthly.py >> ~/cpims-monthly.log 2>&1
from notify import notices
def process_notificaction(report_name):
"""Method to process notification."""
try:
notices(report_name)
except Exception as e:
raise e
else:
pass
if __name__ == '__main__':
process_notificaction('monthly')
| 20.130435 | 77 | 0.663067 |
51568b88b34eb4079d921cb16e5ad6a753c08a2b
| 15,507 |
py
|
Python
|
434-MachineLearning/final_project/linearClassifier/sklearn/cluster/mean_shift_.py
|
neale/CS-program
|
6fef9c39579143bde0ab5d1ec5fedc7210e55814
|
[
"Unlicense"
] | 1 |
2016-10-24T13:36:23.000Z
|
2016-10-24T13:36:23.000Z
|
434-MachineLearning/final_project/linearClassifier/sklearn/cluster/mean_shift_.py
|
neale/CS-program
|
6fef9c39579143bde0ab5d1ec5fedc7210e55814
|
[
"Unlicense"
] | null | null | null |
434-MachineLearning/final_project/linearClassifier/sklearn/cluster/mean_shift_.py
|
neale/CS-program
|
6fef9c39579143bde0ab5d1ec5fedc7210e55814
|
[
"Unlicense"
] | null | null | null |
"""Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0,
n_jobs=1):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile),
n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_mean_shift.py for an example.
"""
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=n_jobs)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=n_jobs).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth,
n_jobs=n_jobs).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| 37.729927 | 78 | 0.663249 |
80753dd39ade80323e44fc0b3a4858fb49d7a5fd
| 1,216 |
py
|
Python
|
flask/boxwise_flask/exceptions.py
|
boxwise/boxtribute
|
b87d3bf52e29cb485d98e669b4b43d1934abf310
|
[
"Apache-2.0"
] | 3 |
2020-10-17T06:37:10.000Z
|
2021-06-08T16:58:38.000Z
|
flask/boxwise_flask/exceptions.py
|
boxwise/boxtribute
|
b87d3bf52e29cb485d98e669b4b43d1934abf310
|
[
"Apache-2.0"
] | 166 |
2020-10-25T20:45:32.000Z
|
2022-03-28T08:18:26.000Z
|
flask/boxwise_flask/exceptions.py
|
boxwise/boxtribute
|
b87d3bf52e29cb485d98e669b4b43d1934abf310
|
[
"Apache-2.0"
] | 4 |
2021-01-01T18:03:57.000Z
|
2022-03-10T08:43:23.000Z
|
class AuthenticationFailed(Exception):
"""Custom exception for authentication errors on web API level (i.e. when
hitting a Flask server endpoint).
"""
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
# Custom exceptions to be raised in GraphQL resolver functions
# cf. https://github.com/mirumee/ariadne/issues/339#issuecomment-604380881
# Re-use error codes proposed by Apollo-Server
# cf. https://www.apollographql.com/docs/apollo-server/data/errors/#error-codes
class UnknownResource(Exception):
extensions = {
"code": "INTERNAL_SERVER_ERROR",
"description": "This resource is not known",
}
class Forbidden(Exception):
def __init__(self, resource, value, user, *args, **kwargs):
self.extensions = {
"code": "FORBIDDEN",
"description": "You don't have access to the resource "
f"{resource}={value}",
"user": user,
}
super().__init__(*args, **kwargs)
class RequestedResourceNotFound(Exception):
extensions = {
"code": "BAD_USER_INPUT",
"description": "The requested resource does not exist in the database.",
}
| 32 | 80 | 0.65625 |
bb766f07bff6a9c1a8229a4918557448fd8470c5
| 734 |
py
|
Python
|
make/photon/prepare/g.py
|
ckd/harbor
|
1ceb7a2fb9512c77deac97def51d875d60a7bf55
|
[
"Apache-2.0"
] | 2 |
2017-06-07T09:34:22.000Z
|
2019-04-17T08:12:57.000Z
|
make/photon/prepare/g.py
|
ckd/harbor
|
1ceb7a2fb9512c77deac97def51d875d60a7bf55
|
[
"Apache-2.0"
] | 2 |
2022-03-02T05:03:32.000Z
|
2022-03-17T22:25:26.000Z
|
make/photon/prepare/g.py
|
ckd/harbor
|
1ceb7a2fb9512c77deac97def51d875d60a7bf55
|
[
"Apache-2.0"
] | 1 |
2019-04-22T04:39:14.000Z
|
2019-04-22T04:39:14.000Z
|
import os
from pathlib import Path
## Const
DEFAULT_UID = 10000
DEFAULT_GID = 10000
## Global variable
base_dir = '/harbor_make'
templates_dir = "/usr/src/app/templates"
config_dir = '/config'
secret_dir = '/secret'
secret_key_dir='/secret/keys'
old_private_key_pem_path = Path('/config/core/private_key.pem')
old_crt_path = Path('/config/registry/root.crt')
private_key_pem_path = Path('/secret/core/private_key.pem')
root_crt_path = Path('/secret/registry/root.crt')
config_file_path = '/compose_location/harbor.yml'
input_config_path = '/input/harbor.yml'
versions_file_path = Path('/usr/src/app/versions')
cert_dir = os.path.join(config_dir, "nginx", "cert")
core_cert_dir = os.path.join(config_dir, "core", "certificates")
| 27.185185 | 64 | 0.760218 |
650491bcb7cdee98b393ce95583ad7bdd5b78365
| 4,451 |
py
|
Python
|
app/src/main/python/main.py
|
CompNeurosurg/sezect
|
0a01b8ff1c5de1cecaeca120d6c895393692482e
|
[
"MIT"
] | null | null | null |
app/src/main/python/main.py
|
CompNeurosurg/sezect
|
0a01b8ff1c5de1cecaeca120d6c895393692482e
|
[
"MIT"
] | null | null | null |
app/src/main/python/main.py
|
CompNeurosurg/sezect
|
0a01b8ff1c5de1cecaeca120d6c895393692482e
|
[
"MIT"
] | null | null | null |
from six.moves import input
import pandas as pd
import numpy as np
from os.path import dirname, join
from android.os import Environment
import sklearn
# from sklearn.externals import joblib
import _multiprocessing
_multiprocessing.sem_unlink = None
import joblib
import time
# import matplotlib.pyplot as plt
def main():
a = 30
b = 6
# print('Addition: ', a + b)
# EEG = np.random.rand(19,5000)
d = str(Environment.getExternalStorageDirectory())
# print (d)
# file = 'src/main/python'
file = d
# f = open(join(dirname(__file__),'graph1.txt'))
# file = '/storage/emulated/0/'
print('<-> Process started... <->')
# print(' ')
# filename = d+'/EEG_Sub_02_Preprocessed.csv'
# print (filename)
# EEG1 = pd.read_csv(join(dirname(__file__),'LAWRENCE_RMCH_Preprocessed_new_half.csv'))
EEG1 = pd.read_csv(d+'/LAWRENCE_RMCH_Preprocessed_new.csv', engine='python')
EEG = np.array(EEG1)
EEG = EEG.T
channel = ['Fp1', 'Fp2', 'F7', 'F3', 'Fz', 'F4', 'F8', 'T3', 'C3', 'Cz', 'C4', 'T4',
'T5', 'P3', 'Pz', 'P4', 'T6', 'O1', 'O2']
# print(EEG.shape)
fs = 128
print('Number of channels: 19')
print('Sampling frequency:', fs, 'Hz')
print('Duration of EEG data:', round((EEG.shape[1])/(fs*60)), 'minutes')
# print(' ')
wl = 256*4
wl1 = wl*0.5
# print(' ')
print('<-> Extracting features... <->')
# print(' ')
start = time.time()
SDI_feature = np.empty((19, round((EEG.shape[1])/wl1)))
MD_feature = np.empty((19, round((EEG.shape[1])/wl1)))
for i in range(1, round((EEG.shape[1])/wl1)-2):
x1 = round(wl1*(i-1)+1)
x2 = x1+wl
for k in range(EEG.shape[0]):
seg_EEG = EEG[k,x1:x2]
SDI_feature[k][i] = SDI(seg_EEG)
MD_feature[k][i] = matDet(seg_EEG)
# print(SDI_feature.shape)
# print(MD_feature.shape)
print('<-> Classifying the features... <->')
# print(' ')
svclassifier_from_pickle = joblib.load(join(dirname(__file__),'SVM_cross_DB.pkl'))
predict_output = np.empty((19,MD_feature.shape[1]-1))
M_global_SDI = 4.4
M_global_Det = 64.8
for i in range(MD_feature.shape[0]):
lambda_test_SDI = M_global_SDI - np.median(SDI_feature[i, 1:])
ydata_SDI = lambda_test_SDI + SDI_feature[i, 1:]
lambda_test_MD = M_global_Det-np.median(MD_feature[i, 1:])
ydata_MD = lambda_test_MD + MD_feature[i, 1:]
X_test = np.transpose([ydata_SDI, ydata_MD])
pred = svclassifier_from_pickle.predict(X_test)
df = pd.DataFrame(pred)
smooth_pred = df.rolling(window = 7).mean()
smooth_pred = smooth_pred.fillna(1)
smooth_pred = np.array(smooth_pred)
# i= channel, j=column
for j in range(len(smooth_pred)):
if smooth_pred[j] >= 0.5:
predict_output[i][j] = 1
else:
predict_output[i][j] = 0
# print(predict_output.shape)
end = time.time()
print('Elapsed time:', round(end - start), 'seconds')
print('Seizure detected: Yes')
# print('Seizure detected channels are: \n', channel)
# print('Number of seizure detected:', 3)
print('Channel Number of seizures')
j=0
nr_seizures = np.empty((19,1))
for ch in range(len(predict_output)):
x = predict_output[ch,:]
nr_seizures[ch] = np.sum((x[1:]-x[:-1]) > 0)
if nr_seizures[ch] != 0:
j +=1
print(channel [ch], ' ', int(nr_seizures[ch]))
print('Total number of seizure epochs:', round(np.sum(nr_seizures)))
if j <=4:
print('Type: Focal seizure')
else:
print('Type: Generalized seizure')
# ch = 2
# fig, ax = plt.subplots(figsize=(30,5))
# plt.subplot(2,1,1)
# plt.plot(EEG[ch,:])
#
# plt.subplot(2,1,2)
# plt.plot(predict_output[ch,:])
def SDI(x):
y = x
N = len(x)
x = abs(x)
L = 10
for k in range(L-1):
j = 0
for i in range(0,len(x)-1,2):
j = j+1;
x[j] = (x[i]+x[i+1])/2
y[j] = (y[i]-y[i+1])/2
x = x[1:round(len(x)/2)]
y = y[1:round(len(y)/2)]
a = x
s = y
aa = (a+s)/2
ss = (a-s)/2
decomp = np.log10((N/L)*(a*aa-ss*s))
return decomp
def matDet(x):
mat = np.reshape(np.multiply(x, x), (32,32))
d = np.linalg.det(mat)
if d == 0:
d = 10
return(np.log10(abs(d/(32*32))))
| 31.34507 | 91 | 0.565042 |
8ab5551b15eec2789257b78b8536f066f2fa3153
| 1,488 |
py
|
Python
|
data/p2DJ/New/R2/benchmark/startCirq6.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startCirq6.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
data/p2DJ/New/R2/benchmark/startCirq6.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=4
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=2
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq6.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
| 25.655172 | 77 | 0.700941 |
ce04fd03fb7f7f27c85504de9df357274f232e95
| 2,377 |
py
|
Python
|
src/lib/centipede/Dispatcher/Renderfarm/RenderfarmJob/ExpandedJob.py
|
paulondc/centipede
|
6000a6964c2ce4a1f9c5ba0fac1d5ab0fead1fbe
|
[
"MIT"
] | null | null | null |
src/lib/centipede/Dispatcher/Renderfarm/RenderfarmJob/ExpandedJob.py
|
paulondc/centipede
|
6000a6964c2ce4a1f9c5ba0fac1d5ab0fead1fbe
|
[
"MIT"
] | null | null | null |
src/lib/centipede/Dispatcher/Renderfarm/RenderfarmJob/ExpandedJob.py
|
paulondc/centipede
|
6000a6964c2ce4a1f9c5ba0fac1d5ab0fead1fbe
|
[
"MIT"
] | null | null | null |
import os
import uuid
from .RenderfarmJob import RenderfarmJob
class ExpandedJob(RenderfarmJob):
"""
Implements an expanded render farm job.
An expanded job is used to run a task on the farm. The processing
of the task can be devided by chunks or in case a task cannot
be divided then it performs the execution of an entire task.
"""
def __init__(self, *args, **kwargs):
"""
Create a render farm expanded job object.
"""
super(ExpandedJob, self).__init__(*args, **kwargs)
self.__chunkSize = 0
self.__currentChunk = 0
self.__chunkTotal = 0
self.__totalInChunk = 0
self.__taskResultFilePath = None
def taskResultFilePath(self):
"""
Return the file path about where the result of the task is going to be serialized.
"""
if self.__taskResultFilePath is None:
self.__taskResultFilePath = os.path.join(
self.jobDirectory(),
"result_{}.json".format(
str(uuid.uuid1())
)
)
return self.__taskResultFilePath
def setChunkSize(self, chunkSize):
"""
Associate the chunk size with the job.
"""
self.__chunkSize = chunkSize
def chunkSize(self):
"""
Return the job chunk size.
"""
return self.__chunkSize
def setTotalInChunk(self, totalInChunk):
"""
Associate the information about the total crawlers in the current chunk.
"""
self.__totalInChunk = totalInChunk
def totalInChunk(self):
"""
Return the information about the total of crawlers in the current chunk.
"""
return self.__totalInChunk
def setCurrentChunk(self, currentChunk):
"""
Associate the information about the current chunk with the job.
"""
self.__currentChunk = currentChunk
def currentChunk(self):
"""
Return information about the current chunk.
"""
return self.__currentChunk
def setChunkTotal(self, chunkTotal):
"""
Associate the total number of chunks with the job.
"""
self.__chunkTotal = chunkTotal
def chunkTotal(self):
"""
Return the job chunk total.
"""
return self.__chunkTotal
| 27.321839 | 90 | 0.595709 |
dd14536f8a54d69fff054ebae286a352d8d19383
| 3,337 |
py
|
Python
|
src/compas_ghpython/artists/meshartist.py
|
kathrindoerfler/compas
|
e876b36b582ee055da673befca1b7ced3834090c
|
[
"MIT"
] | null | null | null |
src/compas_ghpython/artists/meshartist.py
|
kathrindoerfler/compas
|
e876b36b582ee055da673befca1b7ced3834090c
|
[
"MIT"
] | 9 |
2019-09-11T08:53:19.000Z
|
2019-09-16T08:35:39.000Z
|
src/compas_ghpython/artists/meshartist.py
|
Licini/compas
|
34f65adb3d0abc3f403312ffba62aa76f3376292
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import compas_ghpython
from compas_ghpython.artists.mixins import EdgeArtist
from compas_ghpython.artists.mixins import FaceArtist
from compas_ghpython.artists.mixins import VertexArtist
from compas.geometry import centroid_polygon
from compas.utilities import pairwise
__all__ = ['MeshArtist']
class MeshArtist(FaceArtist, EdgeArtist, VertexArtist):
"""A mesh artist defines functionality for visualising COMPAS meshes in GhPython.
Parameters
----------
mesh : compas.datastructures.Mesh
A COMPAS mesh.
Attributes
----------
defaults : dict
Default settings for color, scale, tolerance, ...
Examples
--------
.. code-block:: python
import compas
from compas.datastructures import Mesh
from compas_ghpython.artists import MeshArtist
mesh = Mesh.from_obj(compas.get('faces.obj'))
artist = MeshArtist(mesh)
artist.draw_faces(join_faces=True)
artist.draw_vertices(color={key: '#ff0000' for key in mesh.vertices_on_boundary()})
artist.draw_edges()
"""
def __init__(self, mesh):
self.mesh = mesh
self.defaults = {
'color.vertex': (255, 255, 255),
'color.edge': (0, 0, 0),
'color.face': (210, 210, 210),
}
@property
def mesh(self):
"""compas.datastructures.Mesh: The mesh that should be painted."""
return self.datastructure
@mesh.setter
def mesh(self, mesh):
self.datastructure = mesh
def draw(self, color=None):
"""Deprecated. Use ``draw_mesh()``"""
# NOTE: This warning should be triggered with warnings.warn(), not be a print statement, but GH completely ignores that
print('MeshArtist.draw() is deprecated: please use draw_mesh() instead')
return self.draw_mesh(color)
def draw_mesh(self, color=None):
key_index = self.mesh.key_index()
vertices = self.mesh.vertices_attributes('xyz')
faces = [[key_index[key] for key in self.mesh.face_vertices(fkey)] for fkey in self.mesh.faces()]
new_faces = []
for face in faces:
f = len(face)
if f == 3:
new_faces.append(face + [face[-1]])
elif f == 4:
new_faces.append(face)
elif f > 4:
centroid = len(vertices)
vertices.append(centroid_polygon(
[vertices[index] for index in face]))
for a, b in pairwise(face + face[0:1]):
new_faces.append([centroid, a, b, b])
else:
continue
return compas_ghpython.draw_mesh(vertices, new_faces, color)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
from compas.datastructures import Mesh
from compas.geometry import Polyhedron
poly = Polyhedron.generate(12)
mesh = Mesh.from_vertices_and_faces(poly.vertices, poly.faces)
artist = MeshArtist(mesh)
vertices = artist.draw_vertices()
faces = artist.draw_faces()
edges = artist.draw_edges()
| 30.614679 | 127 | 0.597842 |
255ceacedfbbf5c33e57ee93710311cfd49a499c
| 29,383 |
py
|
Python
|
tests/common.py
|
niulinlnc/home-assistant
|
135e5ea15d482daf444afb96bbb290170c51f56e
|
[
"Apache-2.0"
] | null | null | null |
tests/common.py
|
niulinlnc/home-assistant
|
135e5ea15d482daf444afb96bbb290170c51f56e
|
[
"Apache-2.0"
] | null | null | null |
tests/common.py
|
niulinlnc/home-assistant
|
135e5ea15d482daf444afb96bbb290170c51f56e
|
[
"Apache-2.0"
] | null | null | null |
"""Test the helper method for writing tests."""
import asyncio
import functools as ft
import json
import logging
import os
import sys
import threading
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
from io import StringIO
from unittest.mock import MagicMock, Mock, patch
import homeassistant.util.dt as date_util
import homeassistant.util.yaml as yaml
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
models as auth_models, auth_store, providers as auth_providers,
permissions as auth_permissions)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import mqtt, recorder
from homeassistant.config import async_process_component_config
from homeassistant.const import (
ATTR_DISCOVERED, ATTR_SERVICE, DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE, EVENT_PLATFORM_DISCOVERED, EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED, SERVER_PORT, STATE_ON, STATE_OFF)
from homeassistant.helpers import (
area_registry, device_registry, entity, entity_platform, entity_registry,
intent, restore_state, storage)
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.unit_system import METRIC_SYSTEM
from homeassistant.util.async_ import (
run_callback_threadsafe, run_coroutine_threadsafe)
_TEST_INSTANCE_PORT = SERVER_PORT
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = 'https://example.com/app'
CLIENT_REDIRECT_URI = 'https://example.com/app/callback'
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), 'testing_config', *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
else:
loop = asyncio.new_event_loop()
hass = loop.run_until_complete(async_test_home_assistant(loop))
stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
stop_event.set()
orig_stop = hass.stop
def start_hass(*mocks):
"""Start hass."""
run_coroutine_threadsafe(hass.async_start(), loop=hass.loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant(loop)
hass.config.async_load = Mock()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
if isinstance(target, Mock):
return mock_coro(target(*args))
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock):
return mock_coro()
return orig_async_create_task(coroutine)
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.config.location_name = 'test home'
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone('US/Pacific')
hass.config.units = METRIC_SYSTEM
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = []
hass.config_entries._store._async_ensure_stop_listener = lambda: None
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch('homeassistant.core._async_create_timer'), \
patch.object(hass, 'async_stop_track_tasks'):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def get_test_instance_port():
"""Return unused port for running test instance.
The socket that holds the default port does not get released when we stop
HA in a different test case. Until I have figured out what is going on,
let's run each test on a different port.
"""
global _TEST_INSTANCE_PORT
_TEST_INSTANCE_PORT += 1
return _TEST_INSTANCE_PORT
@ha.callback
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(
domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
@asyncio.coroutine
def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode('utf-8')
msg = mqtt.Message(topic, payload, qos, retain)
hass.data['mqtt']._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, time):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {'now': date_util.as_utc(time)})
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def fire_service_discovered(hass, service, info):
"""Fire the MQTT message."""
hass.bus.fire(EVENT_PLATFORM_DISCOVERED, {
ATTR_SERVICE: service,
ATTR_DISCOVERED: info
})
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), 'fixtures', filename)
with open(path, encoding='utf-8') as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {
'entity_id': new_state.entity_id,
'new_state': new_state,
}
if old_state:
event_data['old_state'] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
async def async_mock_mqtt_component(hass, config=None):
"""Mock the MQTT component."""
if config is None:
config = {mqtt.CONF_BROKER: 'mock-broker'}
with patch('paho.mqtt.client.Client') as mock_client:
mock_client().connect.return_value = 0
mock_client().subscribe.return_value = (0, 0)
mock_client().unsubscribe.return_value = (0, 0)
mock_client().publish.return_value = (0, 0)
result = await async_setup_component(hass, mqtt.DOMAIN, {
mqtt.DOMAIN: config
})
assert result
await hass.async_block_till_done()
hass.data['mqtt'] = MagicMock(spec_set=hass.data['mqtt'],
wraps=hass.data['mqtt'])
return hass.data['mqtt']
mock_mqtt_component = threadsafe_coroutine_factory(async_mock_mqtt_component)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError("Component {} is already setup".format(component))
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name='Mock Group',
policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {
'name': name,
'policy': policy,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(self, id=None, is_owner=False, is_active=True,
name='Mock User', system_generated=False, groups=None):
"""Initialize mock user."""
kwargs = {
'is_owner': is_owner,
'is_active': is_active,
'name': name,
'system_generated': system_generated,
'groups': groups or [],
'perm_lookup': None,
}
if id is not None:
kwargs['id'] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(
policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config)
assert provider is not None, 'Invalid config specified'
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError('Provider already registered')
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(self, domain=None, dependencies=None, setup=None,
requirements=None, config_schema=None, platform_schema=None,
platform_schema_base=None, async_setup=None,
async_setup_entry=None, async_unload_entry=None,
async_migrate_entry=None, async_remove_entry=None):
"""Initialize the mock module."""
self.__name__ = 'homeassistant.components.{}'.format(domain)
self.__file__ = 'homeassistant/components/{}'.format(domain)
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup is not None:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = mock_coro_func(True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
class MockPlatform:
"""Provide a fake platform."""
__name__ = 'homeassistant.components.light.bla'
__file__ = 'homeassistant/components/blah/light'
# pylint: disable=invalid-name
def __init__(self, setup_platform=None, dependencies=None,
platform_schema=None, async_setup_platform=None,
async_setup_entry=None, scan_interval=None):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = mock_coro_func()
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self, hass,
logger=None,
domain='test_domain',
platform_name='test_platform',
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
async_entities_added_callback=lambda: None
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger('homeassistant.helpers.entity_platform')
# Otherwise the constructor will blow up.
if (isinstance(platform, Mock) and
isinstance(platform.PARALLEL_UPDATES, Mock)):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
async_entities_added_callback=async_entities_added_callback,
)
class MockToggleDevice(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state):
"""Initialize the mock device."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the device if any."""
self.calls.append(('name', {}))
return self._name
@property
def state(self):
"""Return the name of the device if any."""
self.calls.append(('state', {}))
return self._state
@property
def is_on(self):
"""Return true if device is on."""
self.calls.append(('is_on', {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the device on."""
self.calls.append(('turn_on', kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the device off."""
self.calls.append(('turn_off', kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls)
if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(self, *, domain='test', data=None, version=1, entry_id=None,
source=config_entries.SOURCE_USER, title='Mock Title',
state=None, options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN):
"""Initialize a mock config entry."""
kwargs = {
'entry_id': entry_id or 'mock-id',
'domain': domain,
'data': data or {},
'options': options,
'version': version,
'title': title,
'connection_class': connection_class,
}
if source is not None:
kwargs['source'] = source
if state is not None:
kwargs['state'] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries.append(self)
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries.append(self)
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(list(files_dict.keys()), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, 'name', fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, 'name', fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if 'homeassistant/components' in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding='utf-8')
# Not found
raise FileNotFoundError("File not found: {}".format(fname))
return patch.object(yaml, 'open', mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
return mock_coro_func(return_value, exception)()
def mock_coro_func(return_value=None, exception=None):
"""Return a method to create a coro function that returns a value."""
@asyncio.coroutine
def coro(*args, **kwargs):
"""Fake coroutine."""
if exception:
raise exception
return return_value
return coro
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
@ha.callback
def mock_psc(hass, config_input, domain_input):
"""Mock the prepare_setup_component to capture config."""
res = async_process_component_config(
hass, config_input, domain_input)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug("Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input))
return res
assert isinstance(config, dict)
with patch('homeassistant.config.async_process_component_config',
mock_psc):
yield config
if domain is None:
assert len(config) == 1, ('assert_setup_component requires DOMAIN: {}'
.format(list(config.keys())))
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert res_len == count, 'setup_component failed, expected {} got {}: {}' \
.format(count, res_len, res)
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = 'sqlite://' # In memory DB
with patch('homeassistant.components.recorder.migration.migrate_schema'):
assert setup_component(hass, recorder.DOMAIN,
{recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
data.last_states = {
state.entity_id: restore_state.StoredState(state, now)
for state in states}
_LOGGER.debug('Restore cache: %s', data.last_states)
assert len(data.last_states) == len(states), \
"Duplicate entity_id? {}".format(states)
async def get_restore_state_data() -> restore_state.RestoreStateData:
return data
# Patch the singleton task in hass.data to return our new RestoreStateData
hass.data[key] = hass.async_create_task(get_restore_state_data())
class MockDependency:
"""Decorator to mock install a dependency."""
def __init__(self, root, *args):
"""Initialize decorator."""
self.root = root
self.submodules = args
def __enter__(self):
"""Start mocking."""
def resolve(mock, path):
"""Resolve a mock."""
if not path:
return mock
return resolve(getattr(mock, path[0]), path[1:])
base = MagicMock()
to_mock = {
"{}.{}".format(self.root, tom): resolve(base, tom.split('.'))
for tom in self.submodules
}
to_mock[self.root] = base
self.patcher = patch.dict('sys.modules', to_mock)
self.patcher.start()
return base
def __exit__(self, *exc):
"""Stop mocking."""
self.patcher.stop()
return False
def __call__(self, func):
"""Apply decorator."""
def run_mocked(*args, **kwargs):
"""Run with mocked dependencies."""
with self as base:
args = list(args) + [base]
func(*args, **kwargs)
return run_mocked
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if 'entity_id' in values:
self.entity_id = values['entity_id']
@property
def name(self):
"""Return the name of the entity."""
return self._handle('name')
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle('should_poll')
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle('unique_id')
@property
def available(self):
"""Return True if entity is available."""
return self._handle('available')
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle('device_info')
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if 'data' not in mock_data or 'version' not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info('Loading data for %s: %s', store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info('Writing data to %s: %s', store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(
data_to_write, cls=store._encoder))
with patch('homeassistant.helpers.storage.Store._async_load',
side_effect=mock_async_load, autospec=True), \
patch('homeassistant.helpers.storage.Store._write_data',
side_effect=mock_write_data, autospec=True):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data['system_health']['info'][domain](hass)
def mock_integration(hass, module):
"""Mock an integration."""
integration = loader.Integration(
hass, 'homeassisant.components.{}'.format(module.DOMAIN), None,
loader.manifest_from_legacy_module(module))
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(
loader.DATA_INTEGRATIONS, {}
)[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_KEY, {})[module.DOMAIN] = module
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split('.')
integration_cache = hass.data.setdefault(loader.DATA_KEY, {})
module_cache = hass.data.setdefault(loader.DATA_KEY, {})
if platform_name not in integration_cache:
mock_integration(hass, MockModule(platform_name))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache["{}.{}".format(platform_name, domain)] = module
| 31.526824 | 79 | 0.65041 |
74ed273e6f57af9b584de6d0a4908bd201279c46
| 1,659 |
py
|
Python
|
app/tests/test_sentiment.py
|
PsyBorgs/redditanalyser
|
8055995c23e3d8fd83f1c8f8a9f9c43004c5ab94
|
[
"MIT"
] | 1 |
2016-11-10T04:46:51.000Z
|
2016-11-10T04:46:51.000Z
|
app/tests/test_sentiment.py
|
PsyBorgs/redditanalyser
|
8055995c23e3d8fd83f1c8f8a9f9c43004c5ab94
|
[
"MIT"
] | null | null | null |
app/tests/test_sentiment.py
|
PsyBorgs/redditanalyser
|
8055995c23e3d8fd83f1c8f8a9f9c43004c5ab94
|
[
"MIT"
] | null | null | null |
import pytest
from sqlalchemy.orm import joinedload
from app.tests import const
from app.models import (
Submission, Comment, SubmissionSentiment, CommentSentiment)
from app import sentiment
def test_comment_sentiment(session):
s = Submission.create(session, **const.MOCK_SUBMISSION)
c = Comment.create(session, **const.MOCK_COMMENT1)
cs = sentiment.comment_sentiment(c)
expected_keys = ['comment_id', 'polarity', 'subjectivity']
assert sorted(cs.keys()) == sorted(expected_keys)
assert isinstance(cs['polarity'], float)
assert isinstance(cs['subjectivity'], float)
def test_comment_sentiment_avg(session):
s = Submission.create(session, **const.MOCK_SUBMISSION)
c1 = Comment.create(session, **const.MOCK_COMMENT1)
c1_sentiment = sentiment.comment_sentiment(c1)
c1s = CommentSentiment.create(session, **c1_sentiment)
c2 = Comment.create(session, **const.MOCK_COMMENT2)
c2_sentiment = sentiment.comment_sentiment(c2)
c2s = CommentSentiment.create(session, **c2_sentiment)
comments = session.query(Comment).\
options(joinedload('sentiment')).\
all()
comment_sentiments = []
for c in comments:
comment_sentiments.append({
"polarity": c.sentiment.polarity,
"subjectivity": c.sentiment.subjectivity
})
csa = sentiment.comment_sentiment_avg(comment_sentiments)
csa.update({'submission_id': s.id})
expected_keys = ['submission_id', 'polarity', 'subjectivity']
assert sorted(csa.keys()) == sorted(expected_keys)
assert isinstance(csa['polarity'], float)
assert isinstance(csa['subjectivity'], float)
| 31.301887 | 65 | 0.710669 |
b7f0cf7db39ee900ba2285d871076da244cb07ff
| 5,063 |
py
|
Python
|
neighborwatch/tests.py
|
iyerikuzwe/ikuzweneighborhood
|
32a3bd707a761881fed538ca0c702fa2e6c3b522
|
[
"Unlicense"
] | null | null | null |
neighborwatch/tests.py
|
iyerikuzwe/ikuzweneighborhood
|
32a3bd707a761881fed538ca0c702fa2e6c3b522
|
[
"Unlicense"
] | null | null | null |
neighborwatch/tests.py
|
iyerikuzwe/ikuzweneighborhood
|
32a3bd707a761881fed538ca0c702fa2e6c3b522
|
[
"Unlicense"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
from .models import Neighbour, Profile, Business, Posts
from django.contrib.auth.models import User
class HoodTestClass(TestCase):
"""
Test neighbour class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1, username='a')
self.hood = Neighbour(name='ikuzwe', location='kagugu', user=self.user)
def test_instance(self):
self.assertTrue(isinstance(self.hood, Neighbour))
def test_save_method(self):
"""
Function to test that neighbourhood is being saved
"""
self.hood.save_hood()
hoods = Neighbour.objects.all()
self.assertTrue(len(hoods) > 0)
def test_delete_method(self):
"""
Function to test that a neighbourhood can be deleted
"""
self.hood.save_hood()
self.hood.delete_hood
def test_update_method(self):
"""
Function to test that a neighbourhood's details can be updated
"""
self.hood.save_hood()
new_hood = Neighbour.objects.filter(name='ikuzwe').update(name='Bias')
hoods = Neighbour.objects.get(name='Bias')
self.assertTrue(hoods.name, 'Bias')
def test_get_by_id(self):
"""
Function to test if you can get a hood by its id
"""
self.hood.save_hood()
this_hood= self.hood.get_by_id(self.hood.id)
hood = Neighbour.objects.get(id=self.hood.id)
self.assertTrue(this_hood, hood)
class ProfileTestClass(TestCase):
"""
Test profile class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1,username='a')
self.hood = Neighbour(name='ikuzwe', location='kagugu', user=self.user)
self.hood.save_hood()
self.pro = Profile(user=self.user, hood = self.hood)
def test_instance(self):
self.assertTrue(isinstance(self.pro, Profile))
def test_save_method(self):
"""
Function to test that profile is being saved
"""
self.pro.save_profile()
profiles = Profile.objects.all()
self.assertTrue(len(profiles) > 0)
def test_delete_method(self):
"""
Function to test that a profile can be deleted
"""
self.pro.save_profile()
self.pro.del_profile()
class BusinessTestClass(TestCase):
"""
Test business class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1, username='a')
self.hood = Neighbour(name='ikuzwe', location='kagugu', user=self.user)
self.hood.save_hood()
self.biz = Business(name="bizna", email="[email protected]", user=self.user, hood=self.hood)
def test_instance(self):
self.assertTrue(isinstance(self.biz, Business))
def test_save_method(self):
"""
Function to test that neighbourhood is being saved
"""
self.biz.save_biz()
bizes = Business.objects.all()
self.assertTrue(len(bizes) > 0)
def test_delete_method(self):
"""
Function to test that a neighbourhood can be deleted
"""
self.biz.save_biz()
self.biz.delete_biz()
def test_update_method(self):
"""
Function to test that a neighbourhood's details can be updated
"""
self.biz.save_biz()
new_biz = Business.objects.filter(name='bizna').update(name='biznas')
bizes = Business.objects.get(name='biznas')
self.assertTrue(bizes.name, 'biznas')
def test_get_by_id(self):
"""
Function to test if you can get a hood by its id
"""
self.biz.save_biz()
this_biz= self.biz.get_by_bizid(self.biz.id)
biz = Business.objects.get(id=self.biz.id)
self.assertTrue(this_biz, biz)
class PostsTestClass(TestCase):
"""
Test posts class and its functions
"""
def setUp(self):
self.user = User.objects.create(id =1, username='a')
self.hood = Neighbour(name='ikuzwe', location='kagugu', user=self.user)
self.hood.save_hood()
self.post = Posts(body="bizna", user=self.user, hood=self.hood)
def test_instance(self):
self.assertTrue(isinstance(self.post, Posts))
def test_save_method(self):
"""
Function to test that a post is being saved
"""
self.post.save_posts()
posts = Posts.objects.all()
self.assertTrue(len(posts) > 0)
def test_delete_method(self):
"""
Function to test that a neighbourhood can be deleted
"""
self.post.save_posts()
self.post.del_posts()
def test_update_method(self):
"""
Function to test that a post's details can be updated
"""
self.post.save_posts()
new_posts = Posts.objects.filter(body='bizna').update(body='biznas')
bizes = Posts.objects.get(body='biznas')
self.assertTrue(bizes.body, 'biznas')
| 26.78836 | 99 | 0.607742 |
931ad34dfe08dc95206bcb7c2f343419727123cd
| 2,780 |
py
|
Python
|
plugins/elasticsearch/komand_elasticsearch/triggers/poll_documents/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46 |
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/elasticsearch/komand_elasticsearch/triggers/poll_documents/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386 |
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/elasticsearch/komand_elasticsearch/triggers/poll_documents/schema.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43 |
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Poll for new documents given a query"
class Input:
FREQUENCY = "frequency"
INDEX = "index"
QUERY = "query"
ROUTING = "routing"
TYPE = "type"
class Output:
HITS = "hits"
class PollDocumentsInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"frequency": {
"type": "integer",
"title": "Frequency",
"description": "Poll frequency in seconds",
"default": 60,
"order": 1
},
"index": {
"type": "string",
"title": "Index",
"description": "Document Index",
"order": 2
},
"query": {
"type": "object",
"title": "Query",
"description": "JSON Query DSL",
"order": 5
},
"routing": {
"type": "string",
"title": "Routing",
"description": "Optional Shards to Search",
"order": 4
},
"type": {
"type": "string",
"title": "Type",
"description": "Document Type",
"order": 3
}
},
"required": [
"index"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class PollDocumentsOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"hits": {
"type": "array",
"title": "Hits",
"description": "New Hits",
"items": {
"$ref": "#/definitions/hit"
},
"order": 1
}
},
"definitions": {
"hit": {
"type": "object",
"title": "hit",
"properties": {
"_id": {
"type": "string",
"title": "ID",
"description": "Document ID",
"order": 3
},
"_index": {
"type": "string",
"title": "Index",
"description": "Document Index",
"order": 1
},
"_score": {
"type": "number",
"title": "Score",
"description": "Relevance Score",
"order": 5
},
"_source": {
"type": "object",
"title": "Source",
"description": "Content of Document",
"order": 6
},
"_type": {
"type": "string",
"title": "Type",
"description": "Document Type",
"order": 2
},
"_version": {
"type": "integer",
"title": "Version",
"description": "Document Version",
"order": 4
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 20.291971 | 64 | 0.472662 |
36a325e9e5af19faa59c25c4b3c237c6b84fcdfe
| 1,052 |
py
|
Python
|
lnd_grpc/config.py
|
willcl-ark/lnd_grpc
|
cf938c51c201f078e8bbe9e19ffc2d038f3abf7f
|
[
"MIT"
] | 36 |
2019-01-27T19:21:19.000Z
|
2022-03-17T00:04:00.000Z
|
lnd_grpc/config.py
|
willcl-ark/lnd_grpc
|
cf938c51c201f078e8bbe9e19ffc2d038f3abf7f
|
[
"MIT"
] | 65 |
2019-02-01T21:09:53.000Z
|
2020-08-26T21:20:26.000Z
|
lnd_grpc/config.py
|
willcl-ark/lnd_grpc
|
cf938c51c201f078e8bbe9e19ffc2d038f3abf7f
|
[
"MIT"
] | 10 |
2019-01-30T20:57:30.000Z
|
2022-01-20T13:20:51.000Z
|
# LND default params
# source: https://github.com/lightningnetwork/lnd/blob/master/config.go
defaultConfigFilename = "lnd.conf"
defaultDataDirname = "data"
defaultChainSubDirname = "chain"
defaultGraphSubDirname = "graph"
defaultTLSCertFilename = "tls.cert"
defaultTLSKeyFilename = "tls.key"
defaultAdminMacFilename = "admin.macaroon"
defaultReadMacFilename = "readonly.macaroon"
defaultInvoiceMacFilename = "invoice.macaroon"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "lnd.log"
defaultRPCPort = 10009
defaultRESTPort = 8080
defaultPeerPort = 9735
defaultRPCHost = "localhost"
defaultNetwork = "mainnet"
defaultNoSeedBackup = False
defaultTorSOCKSPort = 9050
defaultTorDNSHost = "soa.nodes.lightning.directory"
defaultTorDNSPort = 53
defaultTorControlPort = 9051
defaultTorV2PrivateKeyFilename = "v2_onion_private_key"
defaultTorV3PrivateKeyFilename = "v3_onion_private_key"
# lnd_grpc default params
GRPC_OPTIONS = [
("grpc.max_receive_message_length", 33554432),
("grpc.max_send_message_length", 33554432),
]
| 30.941176 | 71 | 0.809886 |
18f0d124457ca50d18b37cc0c1c95ea1175f7b05
| 44,611 |
py
|
Python
|
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/custom.py
|
noelbundick/azure-cli
|
b56636230e3fd3162b9b144f969175641dc230a8
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/custom.py
|
noelbundick/azure-cli
|
b56636230e3fd3162b9b144f969175641dc230a8
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-iot/azure/cli/command_modules/iot/custom.py
|
noelbundick/azure-cli
|
b56636230e3fd3162b9b144f969175641dc230a8
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=no-self-use,no-member,line-too-long,too-few-public-methods
from __future__ import print_function
from enum import Enum
from knack.util import CLIError
from azure.cli.core.commands import LongRunningOperation
from azure.mgmt.iothub.models import (IotHubSku,
AccessRights,
IotHubDescription,
IotHubSkuInfo,
SharedAccessSignatureAuthorizationRule,
IotHubProperties,
EventHubProperties,
RoutingEventHubProperties,
RoutingServiceBusQueueEndpointProperties,
RoutingServiceBusTopicEndpointProperties,
RoutingStorageContainerProperties,
RouteProperties,
RoutingMessage,
TestRouteInput,
TestAllRoutesInput)
from azure.mgmt.iothubprovisioningservices.models import (ProvisioningServiceDescription,
IotDpsPropertiesDescription,
IotHubDefinitionDescription,
IotDpsSkuInfo,
IotDpsSku,
SharedAccessSignatureAuthorizationRuleAccessRightsDescription)
from azure.cli.command_modules.iot.mgmt_iot_hub_device.lib.iot_hub_device_client import IotHubDeviceClient
from azure.cli.command_modules.iot.sas_token_auth import SasTokenAuthentication
from azure.cli.command_modules.iot.shared import EndpointType
from ._client_factory import resource_service_factory
from ._utils import open_certificate
# CUSTOM TYPE
class KeyType(Enum):
primary = 'primary'
secondary = 'secondary'
# This is a work around to simplify the permission parameter for access policy creation, and also align with the other
# command modules.
# The original AccessRights enum is a combination of below four basic access rights.
# In order to avoid asking for comma- & space-separated strings from the user, a space-separated list is supported for
# assigning multiple permissions.
# The underlying IoT SDK should handle this. However it isn't right now. Remove this after it is fixed in IoT SDK.
class SimpleAccessRights(Enum):
registry_read = AccessRights.registry_read.value
registry_write = AccessRights.registry_write.value
service_connect = AccessRights.service_connect.value
device_connect = AccessRights.device_connect.value
# CUSTOM METHODS FOR DPS
def iot_dps_list(client, resource_group_name=None):
if resource_group_name is None:
return client.iot_dps_resource.list_by_subscription()
return client.iot_dps_resource.list_by_resource_group(resource_group_name)
def iot_dps_get(client, dps_name, resource_group_name=None):
if resource_group_name is None:
return _get_iot_dps_by_name(client, dps_name, resource_group_name)
return client.iot_dps_resource.get(dps_name, resource_group_name)
def iot_dps_create(cmd, client, dps_name, resource_group_name, location=None, sku=IotDpsSku.s1.value, unit=1):
cli_ctx = cmd.cli_ctx
_check_dps_name_availability(client.iot_dps_resource, dps_name)
location = _ensure_location(cli_ctx, resource_group_name, location)
dps_property = IotDpsPropertiesDescription()
dps_description = ProvisioningServiceDescription(location=location,
properties=dps_property,
sku=IotDpsSkuInfo(name=sku, capacity=unit))
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
def iot_dps_update(client, dps_name, parameters, resource_group_name):
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, parameters)
def iot_dps_delete(client, dps_name, resource_group_name):
return client.iot_dps_resource.delete(dps_name, resource_group_name)
# DPS access policy methods
def iot_dps_access_policy_list(client, dps_name, resource_group_name):
iot_dps_get(client, dps_name, resource_group_name)
return client.iot_dps_resource.list_keys(dps_name, resource_group_name)
def iot_dps_access_policy_get(client, dps_name, resource_group_name, access_policy_name):
iot_dps_get(client, dps_name, resource_group_name)
return client.iot_dps_resource.list_keys_for_key_name(dps_name, access_policy_name, resource_group_name)
def iot_dps_access_policy_create(cmd, client, dps_name, resource_group_name, access_policy_name, rights, primary_key=None, secondary_key=None, no_wait=False):
dps_access_policies = []
dps_access_policies.extend(iot_dps_access_policy_list(client, dps_name, resource_group_name))
if _is_policy_existed(dps_access_policies, access_policy_name):
raise CLIError("Access policy {0} already existed.".format(access_policy_name))
access_policy_rights = _convert_rights_to_access_rights(rights)
dps_access_policies.append(SharedAccessSignatureAuthorizationRuleAccessRightsDescription(
key_name=access_policy_name, rights=access_policy_rights, primary_key=primary_key, secondary_key=secondary_key))
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=dps.properties.iot_hubs,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=dps_access_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_access_policy_get(client, dps_name, resource_group_name, access_policy_name)
def iot_dps_access_policy_update(cmd, client, dps_name, resource_group_name, access_policy_name, primary_key=None, secondary_key=None, rights=None, no_wait=False):
dps_access_policies = []
dps_access_policies.extend(iot_dps_access_policy_list(client, dps_name, resource_group_name))
if not _is_policy_existed(dps_access_policies, access_policy_name):
raise CLIError("Access policy {0} doesn't exist.".format(access_policy_name))
for policy in dps_access_policies:
if policy.key_name == access_policy_name:
if primary_key is not None:
policy.primary_key = primary_key
if secondary_key is not None:
policy.secondary_key = secondary_key
if rights is not None:
policy.rights = _convert_rights_to_access_rights(rights)
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=dps.properties.iot_hubs,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=dps_access_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_access_policy_get(client, dps_name, resource_group_name, access_policy_name)
def iot_dps_access_policy_delete(cmd, client, dps_name, resource_group_name, access_policy_name, no_wait=False):
dps_access_policies = []
dps_access_policies.extend(iot_dps_access_policy_list(client, dps_name, resource_group_name))
if not _is_policy_existed(dps_access_policies, access_policy_name):
raise CLIError("Access policy {0} doesn't existed.".format(access_policy_name))
updated_policies = [p for p in dps_access_policies if p.key_name.lower() != access_policy_name.lower()]
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=dps.properties.iot_hubs,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=updated_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_access_policy_list(client, dps_name, resource_group_name)
# DPS linked hub methods
def iot_dps_linked_hub_list(client, dps_name, resource_group_name):
dps = iot_dps_get(client, dps_name, resource_group_name)
return dps.properties.iot_hubs
def iot_dps_linked_hub_get(client, dps_name, resource_group_name, linked_hub):
dps = iot_dps_get(client, dps_name, resource_group_name)
for hub in dps.properties.iot_hubs:
if hub.name == linked_hub:
return hub
raise CLIError("Linked hub '{0}' does not exist. Use 'iot dps linked-hub show to see all linked hubs.".format(linked_hub))
def iot_dps_linked_hub_create(cmd, client, dps_name, resource_group_name, connection_string, location, apply_allocation_policy=None, allocation_weight=None, no_wait=False):
dps_linked_hubs = []
dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name))
# Hack due to DPS Swagger/SDK issue
# In the newer API version the name parameter is required
# however in the SDK name is read-only/assigned to None
client.api_version = '2017-11-15'
dps_linked_hubs.append(IotHubDefinitionDescription(connection_string=connection_string,
location=location,
apply_allocation_policy=apply_allocation_policy,
allocation_weight=allocation_weight))
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=dps_linked_hubs,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=dps.properties.authorization_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
def iot_dps_linked_hub_update(cmd, client, dps_name, resource_group_name, linked_hub, apply_allocation_policy=None, allocation_weight=None, no_wait=False):
dps_linked_hubs = []
dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name))
if not _is_linked_hub_existed(dps_linked_hubs, linked_hub):
raise CLIError("Access policy {0} doesn't existed.".format(linked_hub))
for hub in dps_linked_hubs:
if hub.name == linked_hub:
if apply_allocation_policy is not None:
hub.apply_allocation_policy = apply_allocation_policy
if allocation_weight is not None:
hub.allocation_weight = allocation_weight
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=dps_linked_hubs,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=dps.properties.authorization_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_linked_hub_get(client, dps_name, resource_group_name, linked_hub)
def iot_dps_linked_hub_delete(cmd, client, dps_name, resource_group_name, linked_hub, no_wait=False):
dps_linked_hubs = []
dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name))
if not _is_linked_hub_existed(dps_linked_hubs, linked_hub):
raise CLIError("Linked hub {0} doesn't existed.".format(linked_hub))
updated_hub = [p for p in dps_linked_hubs if p.name.lower() != linked_hub.lower()]
dps = iot_dps_get(client, dps_name, resource_group_name)
dps_property = IotDpsPropertiesDescription(iot_hubs=updated_hub,
allocation_policy=dps.properties.allocation_policy,
authorization_policies=dps.properties.authorization_policies)
dps_description = ProvisioningServiceDescription(location=dps.location, properties=dps_property, sku=dps.sku)
if no_wait:
return client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.create_or_update(resource_group_name, dps_name, dps_description))
return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
# DPS certificate methods
def iot_dps_certificate_list(client, dps_name, resource_group_name):
return client.dps_certificate.list(resource_group_name, dps_name)
def iot_dps_certificate_get(client, dps_name, resource_group_name, certificate_name):
return client.dps_certificate.get(certificate_name, resource_group_name, dps_name)
def iot_dps_certificate_create(client, dps_name, resource_group_name, certificate_name, certificate_path):
cert_list = client.dps_certificate.list(resource_group_name, dps_name)
for cert in cert_list.value:
if cert.name == certificate_name:
raise CLIError("Certificate '{0}' already exists. Use 'iot dps certificate update'"
" to update an existing certificate.".format(certificate_name))
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.dps_certificate.create_or_update(resource_group_name, dps_name, certificate_name, None, certificate)
def iot_dps_certificate_update(client, dps_name, resource_group_name, certificate_name, certificate_path, etag):
cert_list = client.dps_certificate.list(resource_group_name, dps_name)
for cert in cert_list.value:
if cert.name == certificate_name:
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.dps_certificate.create_or_update(resource_group_name, dps_name, certificate_name, etag, certificate)
raise CLIError("Certificate '{0}' does not exist. Use 'iot dps certificate create' to create a new certificate."
.format(certificate_name))
def iot_dps_certificate_delete(client, dps_name, resource_group_name, certificate_name, etag):
return client.dps_certificate.delete(resource_group_name, etag, dps_name, certificate_name)
def iot_dps_certificate_gen_code(client, dps_name, resource_group_name, certificate_name, etag):
return client.dps_certificate.generate_verification_code(certificate_name, etag, resource_group_name, dps_name)
def iot_dps_certificate_verify(client, dps_name, resource_group_name, certificate_name, certificate_path, etag):
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.dps_certificate.verify_certificate(certificate_name, etag, resource_group_name, dps_name,
None, None, None, None, None, None, None, None, certificate)
# CUSTOM METHODS
def iot_hub_certificate_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.list_by_iot_hub(resource_group_name, hub_name)
def iot_hub_certificate_get(client, hub_name, certificate_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.get(resource_group_name, hub_name, certificate_name)
def iot_hub_certificate_create(client, hub_name, certificate_name, certificate_path, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
# Get list of certs
cert_list = client.certificates.list_by_iot_hub(resource_group_name, hub_name)
for cert in cert_list.value:
if cert.name == certificate_name:
raise CLIError("Certificate '{0}' already exists. Use 'iot hub certificate update'"
" to update an existing certificate.".format(certificate_name))
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, None, certificate)
def iot_hub_certificate_update(client, hub_name, certificate_name, certificate_path, etag, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
cert_list = client.certificates.list_by_iot_hub(resource_group_name, hub_name)
for cert in cert_list.value:
if cert.name == certificate_name:
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, etag, certificate)
raise CLIError("Certificate '{0}' does not exist. Use 'iot hub certificate create' to create a new certificate."
.format(certificate_name))
def iot_hub_certificate_delete(client, hub_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.delete(resource_group_name, hub_name, certificate_name, etag)
def iot_hub_certificate_gen_code(client, hub_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.generate_verification_code(resource_group_name, hub_name, certificate_name, etag)
def iot_hub_certificate_verify(client, hub_name, certificate_name, certificate_path, etag, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
return client.certificates.verify(resource_group_name, hub_name, certificate_name, etag, certificate)
def iot_hub_create(cmd, client, hub_name, resource_group_name, location=None, sku=IotHubSku.f1.value, unit=1, partition_count=2):
cli_ctx = cmd.cli_ctx
_check_name_availability(client.iot_hub_resource, hub_name)
location = _ensure_location(cli_ctx, resource_group_name, location)
sku = IotHubSkuInfo(name=sku, capacity=unit)
event_hub_dic = {}
event_hub_dic['events'] = EventHubProperties(retention_time_in_days=1, partition_count=partition_count)
properties = IotHubProperties(event_hub_endpoints=event_hub_dic)
hub_description = IotHubDescription(location=location,
sku=sku,
properties=properties)
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub_description)
def _check_name_availability(iot_hub_resource, hub_name):
name_availability = iot_hub_resource.check_name_availability(hub_name)
if name_availability is not None and not name_availability.name_available:
raise CLIError(name_availability.message)
def iot_hub_get(client, hub_name, resource_group_name=None):
if resource_group_name is None:
return _get_iot_hub_by_name(client, hub_name)
return client.iot_hub_resource.get(resource_group_name, hub_name)
def iot_hub_list(client, resource_group_name=None):
if resource_group_name is None:
return client.iot_hub_resource.list_by_subscription()
return client.iot_hub_resource.list_by_resource_group(resource_group_name)
def iot_hub_update(client, hub_name, parameters, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, parameters, {'IF-MATCH': parameters.etag})
def iot_hub_delete(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.delete(resource_group_name, hub_name)
# pylint: disable=inconsistent-return-statements
def iot_hub_show_connection_string(client, hub_name, resource_group_name=None, policy_name='iothubowner',
key_type=KeyType.primary.value):
if hub_name is None:
hubs = iot_hub_list(client, resource_group_name)
if hubs is None:
raise CLIError("No IoT Hub found.")
def conn_str_getter(h):
return _get_single_hub_connection_string(client, h.name, h.additional_properties['resourcegroup'], policy_name, key_type)
return [{'name': h.name, 'connectionString': conn_str_getter(h)} for h in hubs]
else:
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
conn_str = _get_single_hub_connection_string(client, hub_name, resource_group_name, policy_name, key_type)
return {'connectionString': conn_str}
def _get_single_hub_connection_string(client, hub_name, resource_group_name, policy_name, key_type):
access_policy = iot_hub_policy_get(client, hub_name, policy_name, resource_group_name)
# Intermediate fix to support domains beyond azure-devices.net
hub = _get_iot_hub_by_name(client, hub_name)
hostname = hub.properties.host_name
conn_str_template = 'HostName={};SharedAccessKeyName={};SharedAccessKey={}'
key = access_policy.secondary_key if key_type == KeyType.secondary else access_policy.primary_key
return conn_str_template.format(hostname, policy_name, key)
def iot_hub_sku_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_valid_skus(resource_group_name, hub_name)
def iot_hub_consumer_group_create(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.create_event_hub_consumer_group(resource_group_name, hub_name, event_hub_name, consumer_group_name)
def iot_hub_consumer_group_list(client, hub_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.list_event_hub_consumer_groups(resource_group_name, hub_name, event_hub_name)
def iot_hub_consumer_group_get(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_event_hub_consumer_group(resource_group_name, hub_name, event_hub_name, consumer_group_name)
def iot_hub_consumer_group_delete(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.delete_event_hub_consumer_group(resource_group_name, hub_name, event_hub_name, consumer_group_name)
def iot_hub_policy_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.list_keys(resource_group_name, hub_name)
def iot_hub_policy_get(client, hub_name, policy_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_keys_for_key_name(resource_group_name, hub_name, policy_name)
def iot_hub_policy_create(client, hub_name, policy_name, permissions, resource_group_name=None):
rights = _convert_perms_to_access_rights(permissions)
hub = iot_hub_get(client, hub_name, resource_group_name)
policies = []
policies.extend(iot_hub_policy_list(client, hub_name, hub.additional_properties['resourcegroup']))
if _is_policy_existed(policies, policy_name):
raise CLIError("Policy {0} already existed.".format(policy_name))
policies.append(SharedAccessSignatureAuthorizationRule(key_name=policy_name, rights=rights))
hub.properties.authorization_policies = policies
return client.iot_hub_resource.create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_policy_delete(client, hub_name, policy_name, resource_group_name=None):
import copy
hub = iot_hub_get(client, hub_name, resource_group_name)
policies = iot_hub_policy_list(client, hub_name, hub.additional_properties['resourcegroup'])
if not _is_policy_existed(copy.deepcopy(policies), policy_name):
raise CLIError("Policy {0} not found.".format(policy_name))
updated_policies = [p for p in policies if p.key_name.lower() != policy_name.lower()]
hub.properties.authorization_policies = updated_policies
return client.iot_hub_resource.create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag})
def _is_policy_existed(policies, policy_name):
policy_set = set([p.key_name.lower() for p in policies])
return policy_name.lower() in policy_set
def iot_hub_job_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.list_jobs(resource_group_name, hub_name)
def iot_hub_job_get(client, hub_name, job_id, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_job(resource_group_name, hub_name, job_id)
def iot_hub_job_cancel(client, hub_name, job_id, resource_group_name=None):
device_client = _get_device_client(client, resource_group_name, hub_name, '')
return device_client.cancel_job(job_id)
def iot_hub_get_quota_metrics(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_quota_metrics(resource_group_name, hub_name)
def iot_hub_get_stats(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_stats(resource_group_name, hub_name)
def iot_hub_routing_endpoint_create(client, hub_name, endpoint_name, endpoint_type,
endpoint_resource_group, endpoint_subscription_id,
connection_string, container_name=None,
resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
if EndpointType.EventHub.value == endpoint_type.lower():
hub.properties.routing.endpoints.event_hubs.append(
RoutingEventHubProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group
)
)
elif EndpointType.ServiceBusQueue.value == endpoint_type.lower():
hub.properties.routing.endpoints.service_bus_queues.append(
RoutingServiceBusQueueEndpointProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group
)
)
elif EndpointType.ServiceBusTopic.value == endpoint_type.lower():
hub.properties.routing.endpoints.service_bus_topics.append(
RoutingServiceBusTopicEndpointProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group
)
)
elif EndpointType.AzureStorageContainer.value == endpoint_type.lower():
if not container_name:
raise CLIError("Container name is required.")
hub.properties.routing.endpoints.storage_containers.append(
RoutingStorageContainerProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group,
container_name=container_name
)
)
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_routing_endpoint_list(client, hub_name, endpoint_type=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
if not endpoint_type:
return hub.properties.routing.endpoints
elif EndpointType.EventHub.value == endpoint_type.lower():
return hub.properties.routing.endpoints.event_hubs
elif EndpointType.ServiceBusQueue.value == endpoint_type.lower():
return hub.properties.routing.endpoints.service_bus_queues
elif EndpointType.ServiceBusTopic.value == endpoint_type.lower():
return hub.properties.routing.endpoints.service_bus_topics
elif EndpointType.AzureStorageContainer.value == endpoint_type.lower():
return hub.properties.routing.endpoints.storage_containers
def iot_hub_routing_endpoint_show(client, hub_name, endpoint_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
for event_hub in hub.properties.routing.endpoints.event_hubs:
if event_hub.name.lower() == endpoint_name.lower():
return event_hub
for service_bus_queue in hub.properties.routing.endpoints.service_bus_queues:
if service_bus_queue.name.lower() == endpoint_name.lower():
return service_bus_queue
for service_bus_topic in hub.properties.routing.endpoints.service_bus_topics:
if service_bus_topic.name.lower() == endpoint_name.lower():
return service_bus_topic
for storage_container in hub.properties.routing.endpoints.storage_containers:
if storage_container.name.lower() == endpoint_name.lower():
return storage_container
raise CLIError("No endpoint found.")
def iot_hub_routing_endpoint_delete(client, hub_name, endpoint_name=None, endpoint_type=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
hub.properties.routing.endpoints = _delete_routing_endpoints(endpoint_name, endpoint_type, hub.properties.routing.endpoints)
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_create(client, hub_name, route_name, source_type, endpoint_name, enabled=None, condition=None,
resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
hub.properties.routing.routes.append(
RouteProperties(
source=source_type,
name=route_name,
endpoint_names=endpoint_name.split(),
condition=('true' if condition is None else condition),
is_enabled=(True if enabled is None else enabled)
)
)
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_list(client, hub_name, source_type=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
if source_type:
return [route for route in hub.properties.routing.routes if route.source.lower() == source_type.lower()]
return hub.properties.routing.routes
def iot_hub_route_show(client, hub_name, route_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
for route in hub.properties.routing.routes:
if route.name.lower() == route_name.lower():
return route
raise CLIError("No route found.")
def iot_hub_route_delete(client, hub_name, route_name=None, source_type=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
if not route_name and not source_type:
hub.properties.routing.routes = []
if route_name:
hub.properties.routing.routes = [route for route in hub.properties.routing.routes
if route.name.lower() != route_name.lower()]
if source_type:
hub.properties.routing.routes = [route for route in hub.properties.routing.routes
if route.source.lower() != source_type.lower()]
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_update(client, hub_name, route_name, source_type=None, endpoint_name=None, enabled=None,
condition=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
updated_route = next((route for route in hub.properties.routing.routes
if route.name.lower() == route_name.lower()), None)
if updated_route:
updated_route.source = updated_route.source if source_type is None else source_type
updated_route.endpoint_names = updated_route.endpoint_names if endpoint_name is None else endpoint_name.split()
updated_route.condition = updated_route.condition if condition is None else condition
updated_route.is_enabled = updated_route.is_enabled if enabled is None else enabled
else:
raise CLIError("No route found.")
return client.iot_hub_resource.create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_test(client, hub_name, route_name=None, source_type=None, body=None, app_properties=None,
system_properties=None, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
route_message = RoutingMessage(
body=body,
app_properties=app_properties,
system_properties=system_properties
)
if route_name:
route = iot_hub_route_show(client, hub_name, route_name, resource_group_name)
test_route_input = TestRouteInput(
message=route_message,
twin=None,
route=route
)
return client.iot_hub_resource.test_route(test_route_input, hub_name, resource_group_name)
test_all_routes_input = TestAllRoutesInput(
routing_source=source_type,
message=route_message,
twin=None
)
return client.iot_hub_resource.test_all_routes(test_all_routes_input, hub_name, resource_group_name)
def iot_hub_devicestream_show(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(client, hub_name, resource_group_name)
return hub.properties.device_streams
def _get_device_client(client, resource_group_name, hub_name, device_id):
resource_group_name = _ensure_resource_group_name(client, resource_group_name, hub_name)
# Intermediate fix to support domains beyond azure-devices.net
hub = _get_iot_hub_by_name(client, hub_name)
base_url = hub.properties.host_name
uri = '{0}/devices/{1}'.format(base_url, device_id)
access_policy = iot_hub_policy_get(client, hub_name, 'iothubowner', resource_group_name)
creds = SasTokenAuthentication(uri, access_policy.key_name, access_policy.primary_key)
return IotHubDeviceClient(creds, client.iot_hub_resource.config.subscription_id, base_url='https://' + base_url).iot_hub_devices
def _get_iot_hub_by_name(client, hub_name):
all_hubs = iot_hub_list(client)
if all_hubs is None:
raise CLIError("No IoT Hub found in current subscription.")
try:
target_hub = next(x for x in all_hubs if hub_name.lower() == x.name.lower())
except StopIteration:
raise CLIError("No IoT Hub found with name {} in current subscription.".format(hub_name))
return target_hub
def _ensure_location(cli_ctx, resource_group_name, location):
if location is None:
resource_group_client = resource_service_factory(cli_ctx).resource_groups
return resource_group_client.get(resource_group_name).location
return location
def _ensure_resource_group_name(client, resource_group_name, hub_name):
if resource_group_name is None:
return _get_iot_hub_by_name(client, hub_name).additional_properties['resourcegroup']
return resource_group_name
# Convert permission list to AccessRights from IoT SDK.
def _convert_perms_to_access_rights(perm_list):
perm_set = set(perm_list) # remove duplicate
sorted_perm_list = sorted(perm_set)
perm_key = '_'.join(sorted_perm_list)
access_rights_mapping = {
'registryread': AccessRights.registry_read,
'registrywrite': AccessRights.registry_write,
'serviceconnect': AccessRights.service_connect,
'deviceconnect': AccessRights.device_connect,
'registryread_registrywrite': AccessRights.registry_read_registry_write,
'registryread_serviceconnect': AccessRights.registry_read_service_connect,
'deviceconnect_registryread': AccessRights.registry_read_device_connect,
'registrywrite_serviceconnect': AccessRights.registry_write_service_connect,
'deviceconnect_registrywrite': AccessRights.registry_write_device_connect,
'deviceconnect_serviceconnect': AccessRights.service_connect_device_connect,
'registryread_registrywrite_serviceconnect': AccessRights.registry_read_registry_write_service_connect,
'deviceconnect_registryread_registrywrite': AccessRights.registry_read_registry_write_device_connect,
'deviceconnect_registryread_serviceconnect': AccessRights.registry_read_service_connect_device_connect,
'deviceconnect_registrywrite_serviceconnect': AccessRights.registry_write_service_connect_device_connect,
'deviceconnect_registryread_registrywrite_serviceconnect': AccessRights.registry_read_registry_write_service_connect_device_connect
}
return access_rights_mapping[perm_key]
def _is_linked_hub_existed(hubs, hub_name):
hub_set = set([h.name.lower() for h in hubs])
return hub_name.lower() in hub_set
def _get_iot_dps_by_name(client, dps_name, resource_group=None):
all_dps = iot_dps_list(client, resource_group)
if all_dps is None:
raise CLIError("No DPS found in current subscription.")
try:
target_dps = next(x for x in all_dps if dps_name.lower() == x.name.lower())
except StopIteration:
raise CLIError("No DPS found with name {} in current subscription.".format(dps_name))
return target_dps
def _check_dps_name_availability(iot_dps_resource, dps_name):
name_availability = iot_dps_resource.check_provisioning_service_name_availability(dps_name)
if name_availability is not None and not name_availability.name_available:
raise CLIError(name_availability.message)
def _convert_rights_to_access_rights(right_list):
right_set = set(right_list) # remove duplicate
return ",".join(list(right_set))
def _delete_routing_endpoints(endpoint_name, endpoint_type, endpoints):
if endpoint_type:
if EndpointType.ServiceBusQueue.value == endpoint_type.lower():
endpoints.service_bus_queues = []
elif EndpointType.ServiceBusTopic.value == endpoint_type.lower():
endpoints.service_bus_topics = []
elif EndpointType.AzureStorageContainer.value == endpoint_type.lower():
endpoints.storage_containers = []
elif EndpointType.EventHub.value == endpoint_type.lower():
endpoints.event_hubs = []
if endpoint_name:
if any(e.name.lower() == endpoint_name.lower() for e in endpoints.service_bus_queues):
sbq_endpoints = [e for e in endpoints.service_bus_queues if e.name.lower() != endpoint_name.lower()]
endpoints.service_bus_queues = sbq_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.service_bus_topics):
sbt_endpoints = [e for e in endpoints.service_bus_topics if e.name.lower() != endpoint_name.lower()]
endpoints.service_bus_topics = sbt_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.storage_containers):
sc_endpoints = [e for e in endpoints.storage_containers if e.name.lower() != endpoint_name.lower()]
endpoints.storage_containers = sc_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.event_hubs):
eh_endpoints = [e for e in endpoints.event_hubs if e.name.lower() != endpoint_name.lower()]
endpoints.event_hubs = eh_endpoints
if not endpoint_type and not endpoint_name:
endpoints.service_bus_queues = []
endpoints.service_bus_topics = []
endpoints.storage_containers = []
endpoints.event_hubs = []
return endpoints
| 54.27129 | 172 | 0.743471 |
155750ed1326772453a0566eacb2c4f88cee3683
| 593,467 |
py
|
Python
|
login.py
|
iwan002/apa-ja-yg-penting-jalan
|
c5edc7d62746abda56f9c8086fcd18166b9a8ff0
|
[
"Apache-2.0"
] | null | null | null |
login.py
|
iwan002/apa-ja-yg-penting-jalan
|
c5edc7d62746abda56f9c8086fcd18166b9a8ff0
|
[
"Apache-2.0"
] | null | null | null |
login.py
|
iwan002/apa-ja-yg-penting-jalan
|
c5edc7d62746abda56f9c8086fcd18166b9a8ff0
|
[
"Apache-2.0"
] | null | null | null |
from linepy import *
from akad.ttypes import Message
from akad.ttypes import ContentType as Type
from akad.ttypes import ChatRoomAnnouncementContents
from akad.ttypes import ChatRoomAnnouncement
from thrift import transport, protocol, server
from datetime import datetime, timedelta
import pytz, pafy, livejson, time, asyncio, random, multiprocessing, timeit, sys, json, ctypes, tweepy, codecs, threading, glob, re, ast, six, os, subprocess, wikipedia, atexit, goslate, urllib, urllib.parse, urllib3, string, tempfile, shutil, unicodedata
from humanfriendly import format_timespan, format_size, format_number, format_length
import html5lib
import requests,json,urllib3
from random import randint
from bs4 import BeautifulSoup
from gtts import gTTS
from googletrans import Translator
import youtube_dl
from time import sleep
import pyimgflip
from zalgo_text import zalgo
from threading import Thread,Event
import requests,uvloop
import wikipedia as wiki
From BEAPI import *
requests.packages.urllib3.disable_warnings()
loop = uvloop.new_event_loop()
from Naked.toolshed.shell import execute_js
try:
import urllib.request as urllib2
except ImportError:
import urllib2
cl = LINE('[email protected]','sayang1')
cl.log("Auth Token : " + str(cl.authToken))
oepoll = OEPoll(cl)
call = cl
creator = ["u00e287effe898e54347d2ee6502d2ec2"]
owner = ["u00e287effe898e54347d2ee6502d2ec2"]
admin = ["u00e287effe898e54347d2ee6502d2ec2"]
staff = ["u00e287effe898e54347d2ee6502d2ec2"]
Tumbal = ["u8e603fce8dd01a68eeb8837342618f6d","u00e287effe898e54347d2ee6502d2ec2"]
lineProfile = cl.getProfile()
mid = cl.getProfile().mid
KAC = [cl]
Bots = [mid]
Saints = admin + owner + staff
Team = creator + owner + admin + staff + Bots
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
Setbot4 = codecs.open("user.json","r","utf-8")
settings = livejson.File('settings.json', True, False, 4)
premium = json.load(Setbot4)
protectantijs = []
protectcancel = []
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
welcome = []
targets = []
protectname = []
prohibitedWords = ['Asu', 'Jancuk', 'Tai', 'Kickall', 'Ratakan', 'Cleanse']
userTemp = {}
userKicked = []
msg_dict = {}
msg_dict1 = {}
dt_to_str = {}
temp_flood = {}
groupName = {}
groupImage = {}
list = []
ban_list = []
offbot = []
settings = {
"welcome": False,
"leave": False,
"mid": False,
"size": "micro",
"keyCommand": "dent",
"commentPost": "hadir like ʙʏ me: 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"Aip": True,
"replySticker": False,
"jumbosticker": False,
"sticker": False,
"media": False,
"nCall": False,
"checkContact": False,
"postEndUrl": {},
"postingan":{},
"checkPost": False,
"autoRead": True,
"autoJoinTicket": True,
"setKey": False,
"restartPoint": False,
"checkSticker": False,
"userMentioned": False,
"messageSticker": False,
"changeGroupPicture": [],
"keyCommand": "",
"AddstickerTag": {
"sid": "",
"spkg": "",
"status": False
},
"Addsticker":{
"name": "",
"status":False
},
"stk":{},
"selfbot":True,
"Images":{},
"Img":{},
"Addimage":{
"name": "",
"status":False
},
"Videos":{},
"Addaudio":{
"name": "",
"status":False
},
"Addvideo":{
"name": "",
"status":False
},
"myProfile": {
"displayName": "",
"coverId": "",
"pictureStatus": "",
"statusMessage": ""
},
"mimic": {
"copy": False,
"status": False,
"target": {}
},
"unsendMessage": False,
"Picture":False,
"group":{},
"groupPicture":False,
"changevp": False,
"changeCover":False,
"changePicture":False,
"changeProfileVideo": False,
"ChangeVideoProfilevid":{},
"ChangeVideoProfilePicture":{},
"displayName": "",
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.200.32.99 Safari/537.36"
]
}
wait = {
"limit": 1,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"checkmid": False,
"getMid": False,
"invite":False,
"Invi":False,
"staff":{},
"Timeline": True,
"likePost": False,
"likeOn": False,
"addstaff":False,
"dellstaff":False,
"bots":{},
"readPoint":{},
"readMember":{},
"lang":False,
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist": False,
"dblacklist": False,
"wwhitelist": False,
"dwhitelist": False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":True,
"tumbal":True,
"key":True,
"smule":True,
"jumbosticker":False,
"media": True,
"smule":True,
"notifsmule":True,
"smule":True,
"notif":True,
"nCall":False,
"backup":True,
"contact":False,
"autoRead": True,
"autoBlock": True,
"autoJoin":True,
"autoAdd":False,
'autoCancel':{"on":True, "members":1},
"autoReject":False,
"autoLeave":False,
"detectMention":False,
"detectMention2":False,
"detectMention3": False,
"detectMention4": False,
"detectMention5": False,
"Mentionkick":False,
"welcomeOn":False,
"Unsend":False,
"sticker":False,
"smule":True,
"selfbot":True,
"dell":"cok",
"flexghost":" 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"JANJUK":"kowe",
"NGENTOT":"memek",
"broad":"aku datang bawa pesan",
"mention":" 👿anyink ga tuh",
"Respontag":" OPO BRO",
"Respontag2":"ᴋᴀɴɢᴇɴ ʏᴀ ᴛᴀɢ ᴀɪᴍ ᴍᴜʟᴜ",
"Respontag3":"hadir bro",
"Respontag4":"naon anyink ngetag aing wae 😠",
"welcome":" sᴇᴍᴏɢᴀ ʙᴇᴛᴀʜ ɢᴀᴇᴢ",
"autoLeave":" sᴇʟᴀᴍᴀᴛ ᴊᴀʟᴀɴ ᴛᴇᴍᴀɴ",
"comment":"salam kenal guyss\n│http://line.me/ti/p/~waentur01",
"message1":"ᴛʜᴀɴᴋᴢ ғᴏʀᴅ ᴀᴅᴅ ᴍᴇ\n│http://line.me/ti/p/~waentur01",
}
protect = {
"pqr":[],
"pinv":[],
"proall":[],
"antijs":[],
"protect":[]
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
try:
with open("Log_data.json","r",encoding="utf_8_sig") as f:
msg_dict = json.loads(f.read())
except:
print("Couldn't read Log data")
clProfile = cl.getProfile()
myProfile["displayName"] = clProfile.displayName
myProfile["statusMessage"] = clProfile.statusMessage
myProfile["pictureStatus"] = clProfile.pictureStatus
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
imagesOpen = codecs.open("image.json","r","utf-8")
images = json.load(imagesOpen)
videosOpen = codecs.open("video.json","r","utf-8")
videos = json.load(videosOpen)
stickersOpen = codecs.open("sticker.json","r","utf-8")
stickers = json.load(stickersOpen)
audiosOpen = codecs.open("audio.json","r","utf-8")
audios = json.load(audiosOpen)
mulai = time.time()
msg_dict = {}
msg_dict1 = {}
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def cTime_to_datetime(unixtime):
return datetime.fromtimestamp(int(str(unixtime)[:len(str(unixtime))-3]))
def autolike():
for zx in range(0,500):
hasil = cl.activity(limit=500)
if hasil['result']['posts'][zx]['postInfo']['liked'] == True:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
cl.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postid'],wait["comment"])
print ("✪[]► Like Success")
except:
pass
else:
print ("Already Liked")
def dt_to_str(dt):
return dt.strftime('%H:%M:%S')
#delete log if pass more than 24 hours
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > datetime.timedelta(1):
del msg_dict[msg_id]
def delete_log():
ndt = datetime.now()
for data in msg_dict:
if (datetime.utcnow() - cTime_to_datetime(msg_dict[data]["createdTime"])) > timedelta(1):
if "path" in msg_dict[data]:
cl.deleteFile(msg_dict[data]["path"])
del msg_dict[data]
def logError(text):
cl.log("[ ERROR ] {}".format(str(text)))
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.now(tz=tz)
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
time = "{}, {} - {} - {} | {}".format(str(hasil), str(inihari.strftime('%d')), str(bln), str(inihari.strftime('%Y')), str(inihari.strftime('%H:%M:%S')))
with open("logError.txt","a") as error:
error.write("\n[ {} ] {}".format(str(time), text))
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def downloadImageWithURL (mid):
contact = cl.getContact(mid)
if contact.videoProfile == None:
cl.cloneContactProfile(mid)
else:
profile = cl.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
cl.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = cl.getProfileDetail(mid)['result']['objectId']
cl.updateProfileCoverById(coverId)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def sendImage(to, path, name="image"):
try:
if settings["server"] == "VPS":
cl.sendImageWithURL(to, str(path))
except Exception as error:
logError(error)
#remot tagall
def RemotOlengKiller(to, BotsOleng):
try:
AbiOleng = ""
MuhazirOlengKiller = "Total {} Janda \n1.".format(str(len(BotsOleng)))
Desah = []
TapokPipit = 1
JilatMpek = 2
for Sedot in BotsOleng:
MuhazirOleng = "@x\n"
Wikwik = str(len(MuhazirOlengKiller))
Ngentot = str(len(MuhazirOlengKiller) + len(MuhazirOleng) - 1)
AbiOleng = {'S':Wikwik, 'E':Ngentot, 'M':Sedot}
Desah.append(AbiOleng)
MuhazirOlengKiller += MuhazirOleng
if TapokPipit < len(BotsOleng):
TapokPipit += 1
MuhazirOlengKiller += "%i. " % (JilatMpek)
JilatMpek=(JilatMpek+1)
else:
try:
TapokPipit = "\n[ {} ]".format(str(AbiOlengKiller.getGroup(to).name))
except:
TapokPipit = "\n[ Success ]"
cl.sendMessage(to, MuhazirOlengKiller, {'MENTION': str('{"MENTIONEES":' + json.dumps(Desah) + '}')}, 0)
except Exception as error:
logError(error)
def changeProfileVideo(to):
if settings['changevp']['picture'] == True:
return cl.sendMessage(to, "Foto tidak ditemukan")
elif settings['changevp']['video'] == True:
return cl.sendMessage(to, "Video tidak ditemukan")
else:
path = settings['changevp']['video']
files = {'file': open(path, 'rb')}
obs_params = cl.genOBSParams({'oid': cl.getProfile().mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4'})
data = {'params': obs_params}
r_vp = cl.server.postContent('{}/talk/vp/upload.nhn'.format(str(cl.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return cl.sendMessage(to, "Gagal update profile")
path_p = settings['changevp']['picture']
settings['changevp']['status'] = True
cl.updateProfilePicture(path_p, 'vp')
def changeVideoAndPictureProfile(pict, vids):
try:
files = {'file': open(vids, 'rb')}
obs_params = cl.genOBSParams({'oid': mid, 'ver': '2.0', 'type': 'video', 'cat': 'vp.mp4', 'name': 'GEGE.mp4'})
data = {'params': obs_params}
r_vp = cl.server.postContent('{}/talk/vp/upload.nhn'.format(str(cl.server.LINE_OBS_DOMAIN)), data=data, files=files)
if r_vp.status_code != 201:
return "Failed update profile"
cl.updateProfilePicture(pict, 'vp')
return "Success update profile"
except Exception as e:
raise Exception("Error change video and picture profile %s"%str(e))
def cloneProfile(mid):
contact = cl.getContact(mid)
if contact.videoProfile == None:
cl.cloneContactProfile(mid)
else:
profile = cl.getProfile()
profile.displayName, profile.statusMessage = contact.displayName, contact.statusMessage
cl.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + contact.pictureStatus, saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + contact.pictureStatus + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = cl.getProfileDetail(mid)['result']['objectId']
cl.updateProfileCoverById(coverId)
def restoreProfile():
profile = cl.getProfile()
profile.displayName = settings['myProfile']['displayName']
profile.statusMessage = settings['myProfile']['statusMessage']
if settings['myProfile']['videoProfile'] == None:
profile.pictureStatus = settings['myProfile']['pictureStatus']
cl.updateProfileAttribute(8, profile.pictureStatus)
cl.updateProfile(profile)
else:
cl.updateProfile(profile)
pict = cl.downloadFileURL('http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'], saveAs="tmp/pict.bin")
vids = cl.downloadFileURL( 'http://dl.profile.line-cdn.net/' + settings['myProfile']['pictureStatus'] + '/vp', saveAs="tmp/video.bin")
changeVideoAndPictureProfile(pict, vids)
coverId = settings['myProfile']['coverId']
cl.updateProfileCoverById(coverId)
def mentionMembers(to, mid):
try:
arrData = ""
ginfo = cl.getGroup(to)
textx = "╔═════[ Sider Members ]═══════\n║ᴋᴀɴɢ ᴍᴀʟɪɴɢ sᴇᴍᴠᴋ ɴɢɪɴᴛɪᴘ\n╠☛ 1. "
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "╠☛ {}. ".format(str(no))
else:
textx += "╚══════════════════\n╔══════════════════\n 「 ᴛᴏᴛᴀʟ ᴍᴇᴍʙᴇʀ : {} 」\n╚══════════════════".format(str(len(mid)))
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = (str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
# cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = " ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
# cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def leaveMembers(to, mid):
try:
arrData = ""
textx = "".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n┗━━[ {} ]".format(str(aditmadzs.getGroup(to).name))
except:
no = "\n┗━━[ Success ]"
# cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = cl.getAllContactIds()
gid = cl.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" wib\nNama Group : "+str(len(gid))+"\nTeman : "+str(len(teman))+"\nExpired : In "+hari+"\n Version :「Gaje Bots」 \nTanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\nRuntime : \n • "+bot
sendTextTemplate12(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
sendTextTemplate12(to, "[ INFO ] Error :\n" + str(error))
def sendMention1(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
sendTextTemplate12(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
sendTextTemplate12(to, "[ INFO ] Error :\n" + str(error))
def sendTemplates(to, data):
data = data
url = "https://api.line.me/message/v3/share"
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (Linux; Android 8.1.0; Redmi Note 5 Build/OPM1.171019.011; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/67.0.3396.87 Mobile Safari/537.36 Line/8.1.1'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'Bearer eyJhbGciOiJIUzI1NiJ9.5uMcEEHahauPb5_MKAArvGzEP8dFOeVQeaMEUSjtlvMV9uuGpj827IGArKqVJhiGJy4vs8lkkseiNd-3lqST14THW-SlwGkIRZOrruV4genyXbiEEqZHfoztZbi5kTp9NFf2cxSxPt8YBUW1udeqKu2uRCApqJKzQFfYu3cveyk.GoRKUnfzfj7P2uAX9vYQf9WzVZi8MFcmJk8uFrLtTqU'
sendPost = requests.post(url, data=json.dumps(data), headers=headers)
print(sendPost)
return sendPost
def sendTextTemplate12(to, text):
data = {
"type": "text",
"text":text,
"sentBy": {
"label":"♻️𝖉𝖚𝖉𝖚𝖑 𝖇𝖔𝖙𝖘♻️",
"iconUrl": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"linkUrl": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2"
}
}
cl.postTemplate(to, data)
#=====DEF HELP MENU =======
def sendTextTemplate23(to, text):
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":
{
"type": "bubble",
"size": "micro",
"body": {
"backgroundColor": "#000000",
"type": "box",
"layout": "vertical",
"contents": [
{
"contents": [
{
"type": "separator",
"color": "#2bff44"
},
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
},{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"weight": "bold",
"color": "#2bff44",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴛᴇᴍᴘʟᴀᴛᴇ",
"weight": "bold",
"color": "#2bff44",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴠᴇʀsɪ⁴",
"weight": "bold",
"color": "#2bff44",
"size": "xxs",
"flex": 0
},
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#2bff44"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "separator",
"color": "#2bff44"
},
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(mid).pictureStatus),
"size": "xxs",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~sawargibots",
},
"flex": 0
},
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "text",
"text": "🚹{}".format(cl.getContact(mid).displayName),
"weight": "bold",
"color": "#2bff44",
#"align": "center",
"size": "xxs",
"flex": 0
},{
"type": "separator",
"color": "#2bff44"
},{
"type": "text",
"text": "📆 "+ datetime.strftime(timeNow,'%Y-%m-%d'),
"weight": "bold",
"color": "#2bff44",
#"align": "center",
"size": "xxs",
"flex": 0
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#2bff44"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"text": text,
"size": "xxs",
# "align": "center",
"color": "#2bff44",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#2bff44"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#2bff44"
},
{
"contents": [
{
"type": "separator",
"color": "#2bff44"
},
{
"type": "image",
"url": "https://i.ibb.co/XWQd8rj/20190625-201419.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "https://www.youtube.com/channel/UCHjL4ZK41GPortOBOIE8zdA?view_as=subscriber"
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~sawargibots",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/kSMSnWn/20190427-191235.png", #camerahttps://i.ibb.co/hVWDsp8/20190428-232907.png", #smulehttps://i.ibb.co/8YfQVtr/20190427-185626.png", #callinghttps://kepriprov.go.id/assets/img/icon/phone.png", #phone
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/camera/"
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/XDL_IWAN_021_GSV",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 1
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/1sGhJdC/20190428-232658.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/timeline"
},
"flex": 1
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#2bff44"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#2bff44"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
}
}
}
cl.postTemplate(to, data)
def sendTextTemplate906(to, text):
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": text,
"size": "xs",
"color": "#ffffff",
"wrap": True,
"weight": "regular",
"offsetStart": "3px"
}
],
"margin": "xs",
"spacing": "md",
"backgroundColor": "#000000"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"align": "center",
"color": "#ffffff",
"size": "xs"
}
],
"paddingAll": "2px",
"backgroundColor": "#000000",
"margin": "xs"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#FF0000",
"cornerRadius": "10px",
"spacing": "xs"
},
"styles": {
"body": {
"backgroundColor": "#ff0000"
}
}
}
}
cl.postTemplate(to, data)
#=========DEF
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain['keyCommand']):
cmd = pesan.replace(Setmain['keyCommand'],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "╔═════════🚫\n"+\
"┣😱► " + key + "ʜᴇʟᴘ1\n" + \
"┣😱► " + key + "ʜᴇʟᴘ2\n" + \
"┣😱► " + key + "ʜᴇʟᴘ3\n" + \
"┣😱► " + key + "ʜᴇʟᴘ4\n" + \
"┣😱► " + key + "ʜᴇʟᴘ5\n" + \
"┣😱► " + key + "ʜᴇʟᴘ6\n" + \
"┣😱► " + key + "ʜᴇʟᴘ wl\n" + \
"┣😱► " + key + "ʜᴇʟᴘ bl\n" + \
"┣😱► " + key + "help js\n" + \
"┣😱► " + key + "menu \n" + \
"┣😱► " + key + "menu respon\n" + \
"┣😱► " + key + "menu sticker\n" + \
"┣😱► " + key + "menu sticker1\n" + \
"╚═════════🚫"
return helpMessage
def helpcreator():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "╭「🤕ᴄᴏᴍᴀɴᴅ ʙᴏᴛ🤕」\n"+\
"│🤕" + key + "ᴍᴇ\n" + \
"│🤕" + key + "ᴄᴠᴘ\n" + \
"│🤕" + key + "sᴇᴛᴛɪɴɢ\n" + \
"│🤕" + key + "ʀᴜɴᴛɪᴍᴇ\n" + \
"│🤕" + key + "sᴘᴇᴇᴅ-sᴘ\n" + \
"│🤕" + key + "tagal/halo/tag\n" + \
"│🤕" + key + "ʙʏᴇ\n" + \
"│🤕" + key + "ʀj\n" + \
"│🤕" + key + "ʟᴠᴀʟʟ\n" + \
"│🤕" + key + "ʟɪsᴛғʀɪᴇɴᴅ\n" + \
"│🤕" + key + "ғʀɪᴇɴᴅʟɪsᴛ\n" + \
"│🤕" + key + "ɢʀᴜᴘʟɪsᴛ\n" + \
"│🤕" + key + "ᴏᴘᴇɴ ǫʀ\n" + \
"│🤕" + key + "ᴄʟᴏsᴇ ǫʀ\n" + \
"│🤕" + key + "Set tag: [texs]\n" + \
"│🤕" + key + "Set tag2: [texs]\n" + \
"│🤕" + key + "Rtag: [Nogc]\n" + \
"│🤕" + key + "Jepit @\n" + \
"│🤕" + key + "ʙʟᴏᴄᴋ「@」\n" + \
"│🤕" + key + "ᴀᴅᴅᴍᴇ「@」\n" + \
"│🤕" + key + "ᴍʏʙᴏᴛ\n" + \
"│🤕" + key + "ʟɪsᴛᴘᴇɴᴅɪɴɢ\n" + \
"│🤕" + key + "ʙʟᴏᴄᴋᴄᴏɴᴛᴀᴄᴛ\n" + \
"│🤕" + key + "ʟᴋsᴛʙʟᴏᴄᴋ\n" + \
"│🤕" + key + "ʟɪsᴛᴍɪᴅ\n" + \
"│🤕" + key + "ᴀᴅᴅᴀsɪs\n" + \
"│🤕" + key + "ʙʀᴏᴀᴅᴄᴀsᴛ:「ᴛᴇxᴛ」\n" + \
"╰「🤧sᴇʟғ ᴘʏᴛʜᴏɴ³🤧」"
return helpMessage1
def helpadmin():
key = Setmain["keyCommand"]
key = key.title()
helpMessage5 = "╭「🤢ᴄᴏᴍᴀɴᴅ ʙᴏᴛ🤢」\n"+\
"│🤮" + key + "ʙᴏᴛᴀᴅᴅ「@」\n" + \
"│🤮" + key + "ʙᴏᴛᴅᴇʟʟ「@」\n" + \
"│🤮" + key + "sᴛᴀғғ「@」\n" + \
"│🤮" + key + "sᴛᴀғᴅᴇʟʟ「@」\n" + \
"│🤮" + key + "ᴀᴅᴍɪɴ「@」\n" + \
"│🤮" + key + "ᴀᴅᴍɪɴᴅᴇʟʟ「@」\n" + \
"│🤮" + key + "#ʀᴇʙᴏᴏᴛ\n" + \
"│🤮" + key + "ʙᴀɴ「@」\n" + \
"│🤮" + key + "ʙʟᴄ\n" + \
"│🤮" + key + "ʙᴀɴ:ᴏɴ\n" + \
"│🤮" + key + "ᴜɴʙᴀɴ:oɴ\n" + \
"│🤮" + key + "ᴜɴʙᴀɴ「@」\n" + \
"│🤮" + key + "ʙᴀɴʟɪsᴛ\n" + \
"│🤮" + key + "ᴄʙᴀɴ\n" + \
"│🤮" + key + "ʀᴇғʀᴇsʜ\n" + \
"╰「🤢sᴇʟғ ᴘʏᴛʜᴏɴ³🤢」"
return helpMessage5
def helpgroup():
key = Setmain["keyCommand"]
key = key.title()
helpMessage4 = "╭「👽sᴇʟғ ᴘʏᴛʜᴏɴ³👽」\n"+\
"│😠" + key + "ɢᴍɪᴅ @\n" + \
"│😠" + key + "ɢᴇᴛ ɪᴅ @\n" + \
"│😠" + key + "ɢᴇᴛᴍɪᴅ @\n" + \
"│😠" + key + "ɢᴇᴛʙɪᴏ @\n" + \
"│😠" + key + "ɢᴇᴛɪɴғᴏ @\n" + \
"│😠" + key + "ɢᴇᴛᴘʀᴏғɪʟᴇ @\n" + \
"│😠" + key + "ɢᴇᴛᴘɪᴄᴛᴜʀᴇ @\n" + \
"│😠" + key + "ɪɴғᴏ @\n" + \
"│😠" + key + "ᴋᴇᴘᴏ @\n" + \
"│😠" + key + "ᴘᴘᴠɪᴅᴇᴏ @\n" + \
"│😠" + key + "ᴋᴏɴᴛᴀᴋ @\n" + \
"│😠" + key + "ᴄᴏɴᴛᴀᴄᴛ:「ᴍɪᴅ」\n" + \
"│😠" + key + "ɢɴᴀᴍᴇ「ᴛᴇxᴛ」\n" + \
"│😠" + key + "ᴍʏᴍɪᴅ\n" + \
"│😠" + key + "ᴍʏʙɪᴏ\n" + \
"│😠" + key + "ᴍʏғᴏᴛᴏ\n" + \
"│😠" + key + "ᴍʏɴᴀᴍᴇ\n" + \
"│😠" + key + "ᴍʏᴘʀᴏғɪʟᴇ\n" + \
"│😠" + key + "ᴍʏᴘɪᴄᴛᴜʀᴇ\n" + \
"│😠" + key + "ᴍʏᴄᴏᴠᴇʀ\n" + \
"│😠" + key + "ᴍʏᴠɪᴅᴇᴏ\n" + \
"│😠" + key + "ᴋᴀʟᴇɴᴅᴇʀ\n" + \
"│😠" + key + "ᴍᴇᴍᴘɪᴄᴛ\n" + \
"│😠" + key + "ᴜᴘᴅᴀᴛᴇɢʀᴜᴘ\n" + \
"│😠" + key + "ɢʀᴜᴘᴘɪᴄᴛ\n" + \
"│😠" + key + "ɪɴғᴏɢʀᴏᴜᴘ「ɴᴏ」\n" + \
"│😠" + key + "ɪɴғᴏᴍᴇᴍ「ɴᴏ」\n" + \
"╰「👽sᴇʟғ ᴘʏᴛʜᴏɴ³👽」"
return helpMessage4
def helpsetting():
key = Setmain["keyCommand"]
key = key.title()
helpMessage2 = "╭「👽💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍👽」\n"+\
"│😡" + key + "ᴄᴇᴋ sɪᴅᴇʀ\n" + \
"│😡" + key + "ᴄᴇᴋ ʟᴇᴀᴠᴇ \n" + \
"│😡" + key + "ᴄᴇᴋ ᴘᴇsᴀɴ \n" + \
"│😡" + key + "ᴄᴇᴋ ʀᴇsᴘᴏɴ \n" + \
"│😡" + key + "ᴄᴇᴋ ʀᴇsᴘᴏɴ² \n" + \
"│😡" + key + "sᴇᴛ sɪᴅᴇʀ:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "sᴇᴛ ᴘᴇsᴀɴ:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "sᴇᴛ ʀᴇsᴘᴏɴ:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "sᴇᴛ ʀᴇsᴘᴏɴ²:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "sᴇᴛ ᴡᴇʟᴄᴏᴍᴇ:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "sᴇᴛ ʟᴇᴀᴠᴇ:「ᴛᴇxᴛ」\n" + \
"│😡" + key + "ʟɪᴋᴇ「ᴏɴ/ᴏғғ」\n" + \
"│😡" + key + "sider「ᴏɴ/ᴏғғ」\n" + \
"│😡" + key + "stag:「jumlah」\n" + \
"│😡" + key + "stag「@kontak」\n" + \
"│😡" + key + "call:「jumlah」\n" + \
"│😡" + key + "scall \n" + \
"│😡" + key + "scallto/yank\n" + \
"│😡" + key + "ᴘᴏsᴛ「oɴ/oғғ」\n" + \
"│😡" + key + "sᴛɪᴄᴋᴇʀ「oɴ/oғғ」\n" + \
"│😡" + key + "ɪɴᴠɪᴛᴇ「oɴ/ᴏғғ」\n" + \
"│😡" + key + "ᴜɴsᴇɴᴅ「oɴ/oғғ」\n" + \
"│😡" + key + "ʀᴇsᴘᴏɴ「oɴ/oғғ」\n" + \
"│😡" + key + "ʀᴇsᴘᴏɴ²「oɴ/oғғ」\n" + \
"│😡" + key + "ᴀᴜᴛᴏᴀᴅᴅ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴡᴇʟᴄᴏᴍᴇ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴄᴏɴᴛᴀᴄᴛ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴀᴜᴛᴏᴊᴏɪɴ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴀᴜᴛᴏʀᴇᴊᴇᴄᴛ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴀᴜᴛᴏʟᴇᴀᴠᴇ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴀᴜᴛᴏʙʟᴏᴄᴋ「oɴ/oғғ」\n" + \
"│😡" + key + "ᴊᴏɪɴᴛɪᴄᴋᴇᴛ「oɴ/oғғ」\n" + \
"╰「😡sᴇʟғ ᴘʏᴛʜᴏɴ³😡」"
return helpMessage2
def media():
key = Setmain["keyCommand"]
key = key.title()
helpMessage3 = "╔═══════════\n" + \
"┣😈► " + key + "Addsticker\n" + \
"┣😈► " + key + "Addmp3\n" + \
"┣😈► " + key + "Addaudio\n" + \
"┣😈► " + key + "Addimg\n" + \
"┣😈► " + key + "Dellsticker\n" + \
"┣😈► " + key + "Dellaudio\n" + \
"┣😈► " + key + "Dellmp3\n" + \
"┣😈► " + key + "Dellvideo\n" + \
"┣😈► " + key + "Dellimg\n" + \
"┣😈► " + key + "Liststicker\n" + \
"┣😈► " + key + "Listimage\n" + \
"┣😈► " + key + "Listvideo\n" + \
"┣😈► " + key + "Listaudio\n" + \
"┣😈► " + key + "Listmp3\n" + \
"┣😈► " + key + "Lihat「No」\n" + \
"┣😈► " + key + "Cctv metro\n" + \
"┣😈► " + key + "Smule「id」\n" + \
"┣😈► " + key + "Joox「text」\n" + \
"┣😈► " + key + "mp4「text」\n" + \
"┣😈► " + key + "mp3「text」\n" + \
"┣😈► " + key + "Yutube「text」\n" + \
"┣😈► " + key + "Youtube「text」\n" + \
"╚═══════════"
return helpMessage3
def helpghost():
key = Setmain["keyCommand"]
key = key.title()
helpMessage6 = "╭「🐆ᴄᴏᴍᴀɴᴅ ʙᴏᴛ🐆」\n"+\
"│🐃" + key + "sᴛᴀʏ\n" + \
"│🐃" + key + "ᴊs ɪɴ-ᴏᴜᴛ\n" + \
"│🐃" + key + "ᴀᴊsғᴏᴛᴏ\n" + \
"│🐃" + key + "aing ᴀʙsᴇɴ\n" + \
"│🐃" + key + "ᴊs ᴄᴇʟ\n" + \
"│🐃" + key + "ᴊs ᴋᴀʟʟ\n" + \
"│🐃" + key + "ᴘᴀs\n" + \
"│🐃" + key + "ᴊsɴᴀᴍᴇ [ᴛᴇxᴛ]\n" + \
"│🐃" + key + "ᴄᴇᴋʙᴏᴛ\n" + \
"│🐃" + key + "ᴋɪᴄᴋ「@」\n" + \
"│🐃" + key + "ᴄʀᴏᴛ-ᴄʀɪᴛ\n" + \
"╰「🐆𝖉𝖚𝖉𝖚𝖑 𝖇𝖔𝖙𝖘 𝖙𝖊𝖆𝖒🐆」"
return helpMessage6
def helpwl():
key = Setmain["keyCommand"]
key = key.title()
helpMessage7 = "╭「🐆ᴄᴏᴍᴀɴᴅ ʙᴏᴛ🐆」\n"+\
"│🐃" + key + "{key}whitelist\n" + \
"│🐃" + key + "ᴊ{key}clearwl\n" + \
"│🐃" + key + "{key}detectwl\n" + \
"│🐃" + key + "{key}addwl「 Mention 」\n" + \
"│🐃" + key + "{key}dewl「 Mention 」\n" + \
"│🐃" + key + "{key}wl:「 On/Off 」\n" + \
"│🐃" + key + "{key}unwl「 Num 」\n" + \
"╰「🐆𝖉𝖚𝖉𝖚𝖑 𝖇𝖔𝖙𝖘 𝖙𝖊𝖆𝖒🐆」"
return helpMessage7
def helpbl():
key = Setmain["keyCommand"]
key = key.title()
helpMessage8 = "╭「🐆ᴄᴏᴍᴀɴᴅ ʙᴏᴛ🐆」\n"+\
"│🐃" + key + "{key}blacklist\n" + \
"│🐃" + key + "{key}clearbl\n" + \
"│🐃" + key + "{key}detectbl\n" + \
"│🐃" + key + "{key}addbl「 Mention 」\n" + \
"│🐃" + key + "{key}debl「 Mention 」\n" + \
"│🐃" + key + "{key}bl:「 On/Off 」\n" + \
"│🐃" + key + "{key}unbl「 Num 」\n" + \
"╰「🐆𝖉𝖚𝖉𝖚𝖑 𝖇𝖔𝖙𝖘 𝖙𝖊𝖆𝖒🐆」"
return helpMessage8
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoBlock"] == True:
cl.blockContact(op.param1)
sendTextTemplate12(op.param1,"maaf auto block on\n│http://line.me/ti/p/~behboaedan021")
if op.type == 13 or op.type == 124:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
sendTextTemplate12(op.param1,"eмooн coĸ" +str(ginfo.name))
cl.leavegroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
sendTextTemplate12(op.param1,"ᴛʜᴀɴᴋs" + str(ginfo.name))
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
if msg.contentType == 6:
if wait["notif"] == True:
if msg._from not in Bots:
try:
contact = cl.getContact(sender)
group = cl.getGroup(msg.to)
cover = cl.getProfileCoverURL(sender)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if msg.toType == 2:
b = msg.contentMetadata['GC_EVT_TYPE']
c = msg.contentMetadata["GC_MEDIA_TYPE"]
if c == 'AUDIO' and b == "S":
arg = "• nah kan jones nangkring FCG🤣"
arg += "\n• ᴛʏᴘᴇ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
sendTextTemplate12(msg.to,arg)
if c == 'VIDEO' and b == "S":
arg = "• cie ngajakin VCG ikut dong🤣"
arg += "\n• ᴛʏᴘᴇ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}.@!".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
sendTextTemplate12(msg.to,arg)
if c == 'LIVE' and b == "S":
arg = "• waseekk ada atis LIVE 👍"
arg += "\n• ᴛʏᴘᴇ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
sendTextTemplate12(msg.to,arg)
else:
mills = int(msg.contentMetadata["DURATION"])
seconds = (mills/1000)%60
if c == "AUDIO" and b == "E":
arg = "• nah ko udahan sih FCG nya"
arg += "\n• ᴅɪᴀᴋʜɪʀɪ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
arg += "\n• ᴅʀ: {}".format(seconds)
sendTextTemplate12(msg.to,arg)
if c == "VIDEO" and b == "E":
arg = "• baru jga mau ikut VCG malah turun"
arg += "\n• ᴅɪᴀᴋʜɪʀɪ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
arg += "\n• ᴅʀ: {}".format(seconds)
sendTextTemplate12(msg.to,arg)
if c == "LIVE" and b == "E":
arg = "• ko udahan sil LIVE nya "
arg += "\n• ᴅɪᴀᴋʜɪʀɪ {} ᴄᴀʟʟ".format(c)
arg += "\n• ɴᴍ: {}.".format(str(contact.displayName))
arg += "\n• ɢᴄ: {}".format(str(group.name))
arg += "\n• ʜʀ: {}".format(timeNow.strftime('%A'))
arg += "\n• ᴊᴍ: {}".format(datetime.strftime(timeNow,'%H:%M:%S'))
arg += "\n• ᴛɢ: {}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
arg += "\n• ᴅʀ: {}".format(seconds)
sendTextTemplate12(msg.to,arg)
except Exception as error:
print (error)
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
if msg.contentType == 6:
if wait["nCall"] == True:
if msg._from not in Bots:
try:
contact = cl.getContact(sender)
group = cl.getGroup(msg.to)
cover = cl.getProfileCoverURL(sender)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if msg.toType == 2:
b = msg.contentMetadata['GC_EVT_TYPE']
c = msg.contentMetadata["GC_MEDIA_TYPE"]
if c == "VIDEO" and b == "S":
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
arg = "ɢʀᴏᴜᴘ {} call".format(c)
a1 = "{}".format(str(contact.displayName))
a2 = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
a3 = "{}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#ffff00"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "0px",
#"borderColor": "#ff0000",
"cornerRadius": "3px",
"offsetTop": "3px",
"offsetStart": "2px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢CALL VIDEO",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "1px",
#"borderColor": "#00ff00",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": a1,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "20px",
"offsetTop": "265px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴋᴇᴛᴀᴜᴀɴ ᴊᴏɴᴇs ɴʏᴀ ᴅɪᴀ",
"size": "xxs",
"color": "#ffffff",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#000000"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": a2,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#00ff00"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": a3,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "70px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "0px",
#"borderColor": "#00ff00",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "0px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#00ff00",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
if c == 'AUDIO' and b == "S":
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
arg = "ɢʀᴏᴜᴘ {} call".format(c)
satu = "{}".format(str(contact.displayName))
dua = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
tiga = "{}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#000000"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "2px",
#"borderColor": "#ff0000",
"cornerRadius": "3px",
"offsetTop": "5px",
"offsetStart": "3px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢CALL AUDIO",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "2px",
#"borderColor": "#ff0000",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": satu,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "20px",
"offsetTop": "266px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴋᴇᴛᴀᴜᴀɴ ᴊᴏɴᴇs ɴʏᴀ ᴅɪᴀ",
"size": "xxs",
"color": "#ffff00",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#000000"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": dua,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#ff0000"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": tiga,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "90px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "2px",
"borderColor": "#ff0000",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "9px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#ff0000",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
if c == 'LIVE' and b == 'S':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
arg = "ɢʀᴏᴜᴘ {} call".format(c)
c1 = "{}".format(str(contact.displayName))
c2 = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
c3 = "{}".format(datetime.strftime(timeNow,'%d-%m-%Y'))
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#000000"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "2px",
#"borderColor": "#ffff00",
"cornerRadius": "3px",
"offsetTop": "5px",
"offsetStart": "3px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢LIVE VIDEO",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "2px",
#"borderColor": "#ffff00",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": c1,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "1px",
"borderColor": "#ffff00",
"cornerRadius": "20px",
"offsetTop": "265px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "sɪᴊᴏɴᴇs ʟᴀɢɪ ᴄᴀᴘᴇʀ ʟɪᴠᴇ",
"size": "xxs",
"color": "#ffffff",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#2F4F4Fcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ffff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ffff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": c2,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#ffff00"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": c3,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "90px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "2px",
"borderColor": "#ffff00",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "9px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#ffff00",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
else:
mills = int(msg.contentMetadata["DURATION"])
seconds = (mills/1000)%60
if c == "VIDEO" and b == "E":
# tz = pytz.timezone("Asia/Jakarta")
# timeNow = datetime.now(tz=tz)
arg ="ɢʀᴏᴜᴘ {} call".format(c)
b1 = "{}".format(str(contact.displayName))
b2 = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
b3 = "{}".format(seconds)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#000000"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "2px",
#"borderColor": "#00ff00",
"cornerRadius": "3px",
"offsetTop": "5px",
"offsetStart": "3px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢END CALLVI",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "2px",
#"borderColor": "#00ff00",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": b1,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "20px",
"offsetTop": "265px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴛᴜᴋᴀɴɢ ɴɪᴋᴜɴɢ ᴛᴜʀᴜɴ",
"size": "xxs",
"color": "#ffffff",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#2F4F4Fcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#00ff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": b2,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#00ff00"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": b3,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "55px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "2px",
"borderColor": "#00ff00",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "9px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#00ff00",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
if c == "AUDIO" and b == "E":
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
arg ="ɢʀᴏᴜᴘ {} call".format(c)
empat = "{}".format(str(contact.displayName))
lima = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
enam = "{}".format(seconds)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#000000"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "2px",
#"borderColor": "#ff0000",
"cornerRadius": "3px",
"offsetTop": "5px",
"offsetStart": "3px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢END CALL",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "2px",
#"borderColor": "#ff0000",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": empat,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "20px",
"offsetTop": "265px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴛᴜᴋᴀɴɢ ɴɪᴋᴜɴɢ ᴛᴜʀᴜɴ",
"size": "xxs",
"color": "#ffffff",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#2F4F4Fcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ff0000",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": lima,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#ff0000"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": enam,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "90px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "2px",
"borderColor": "#ff0000",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "9px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#ff0000",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
if c == "LIVE" and b == "E":
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
arg ="ɢʀᴏᴜᴘ {} call".format(c)
d1 = "{}".format(str(contact.displayName))
d2 = "{}".format(datetime.strftime(timeNow,'%H:%M:%S'))
d3 = "{}".format(seconds)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:4",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#000000"
}
],
"position": "absolute",
"width": "148px",
"height": "221px",
"borderWidth": "2px",
#"borderColor": "#fi9fff00",
"cornerRadius": "3px",
"offsetTop": "5px",
"offsetStart": "3px",
#"backgroundColor": "#B0C4DEcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "1:2",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "154px",
"height": "190px",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "5px",
"offsetTop": "10px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📞⃢END LIVE",
"size": "xxs",
"color": "#ffffff",
"offsetTop": "1px",
"offsetStart": "15px",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "93px",
"height": "25px",
#"backgroundColor": "#000000",
"offsetTop": "0px",
"offsetStart": "30px",
"borderWidth": "2px",
#"borderColor": "#ffff00",
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": d1,
"size": "xxs",
"offsetTop": "2px",
"offsetStart": "3px",
"color": "#00ff00",
"weight": "bold",
"style": "italic"
}
],
"position": "absolute",
"width": "154px",
"height": "23px",
"backgroundColor": "#000000",
"borderWidth": "2px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "202px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xxs",
"color": "#6699cc"
}
],
"position": "absolute",
"width": "76px",
"height": "40px",
#"backgroundColor": "#6699cc",
"borderWidth": "2px",
"borderColor": "#ffff00",
"cornerRadius": "20px",
"offsetTop": "265px",
"offsetStart": "39px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴋᴀsɪᴀɴ sɪᴊᴏɴᴇs ʟɪᴠᴇ ɢᴅᴀ ʏɢ ʟɪᴀᴛ",
"size": "xxs",
"color": "#ffffff",
"wrap": True,
"weight": "bold",
"style": "italic",
"align": "center"
}
],
"position": "absolute",
"width": "154px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#000000",
"cornerRadius": "2px",
"offsetTop": "225px",
"offsetStart": "1px",
#"backgroundColor": "#2F4F4Fcc"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ffff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectRatio": "2:3",
"aspectMode": "cover"
}
],
"width": "40px",
"height": "40px",
"borderWidth": "1px",
"borderColor": "#ffff00",
"cornerRadius": "100px",
"offsetTop": "265px",
"offsetStart": "114px",
"position": "absolute"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": d2,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "2px"
}
],
"position": "absolute",
"width": "55px",
"height": "20px",
#"backgroundColor": "#F08080cc",
"offsetTop": "263px",
"offsetStart": "55px",
#"borderColor": "#ffff00"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": d3,
"size": "xxs",
"color": "#ffffff",
"offsetTop": "0px",
"offsetStart": "2px"
}
],
"width": "55px",
"height": "17px",
#"backgroundColor": "#ffffff",
"position": "absolute",
"offsetTop": "283px",
"offsetStart": "55px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectRatio": "4:4",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "135px",
"height": "105px",
"borderWidth": "2px",
"borderColor": "#ffff00",
"cornerRadius": "10px",
"offsetTop": "900px",
"offsetStart": "9px"
}
],
"paddingAll": "0px",
"borderWidth": "2px",
"borderColor": "#ffff00",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#C0C0C0"
}
}
}
]
}
}
cl.postTemplate(msg.to, data)
except Exception as error:
print (error)
if op.type == 13 or op.type == 124:
if wait["autoJoin"] and mid in op.param3:
group = cl.getGroup(op.param1)
group.notificationDisabled = False
cl.acceptGroupInvitation(op.param1)
# chat.chats[0].extra.groupExtra.preventedJoinByTicket = False
cl.updateGroup(group)
ginfo = cl.getGroup(op.param1)
sendTextTemplate12(op.param1,"ᴛʜᴀɴᴋs" + str(ginfo.name))
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if (wait["message1"] in [" "," ","\n",None]):
pass
else:
cl.sendMessage(op.param1, wait["message1"])
if op.type == 13 or op.type == 124:
if mid in op.param3:
if wait["autoReject"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.rejectGroupInvitation(op.param1)
if op.type == 13 or op.type == 124:
if op.param2 in wait["blacklist"]:
try:
cl.cancelGroupInvitetion(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
except:
try:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _dn in gMembMids:
if _dn in wait["blacklist"]:
cl.cancelGroupInvitetion(op.param1,[_dn])
except:
cl.cancelGroupInvitetion(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
if op.param3 in wait["blacklist"]:
try:
cl.cancelGroupInvitetion(op.param1,[op.param3])
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
except:
try:
group = cl.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _dn in gMembMids:
if _dn in wait["blacklist"]:
cl.cancelGroupInvitetion(op.param1,[_dn])
except:
cl.cancelGroupInvitetion(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
if op.type == 17 or op.type == 130:
if op.param2 in wait["blacklist"]:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.sendMessage(op.param1,"そ、[ʙʟᴀᴄᴋʟɪsᴛ]そうですか(`・ω・´)")
except:
pass
if op.type == 32 or op.type == 126:
if wait["backup"] == True:
if op.param3 in Bots:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
except:
pass
return
if op.type == 19 or op.type == 133 or op.type == 32:
if mid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in creator:
pass
if op.param2 in admin:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.acceptGroupInvitation(op.param1)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 19 or op.type == 133 or op.type == 32:
if op.param3 in creator:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
if op.type == 19 or op.type == 133 or op.type == 32:
if op.param3 in admin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
if op.type == 55:
try:
if op.param1 in read["readPoint"]:
if op.param2 in read["readMember"][op.param1]:
pass
else:
read["readMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if op.type == 55:
if op.param2 in wait["blacklist"]:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.sendMessage(op.param1,"そ、[ʙʟᴀᴄᴋʟɪsᴛ]そうですか(`・ω・´)")
except:
pass
if op.type == 32 or op.type == 126:
if wait["tumbal"] == True:
if op.param3 in Tumbal:
if op.param2 not in Bots and op.param2 not in creator and op.param2 not in admin:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,Tumbal)
except:
pass
return
if op.param3 in mid: #Kalo Admin ke Kick
if op.param2 in Bots:
pass
if op.param2 in mid:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
if op.param3 in Amid: #Akun Utama Ke Kick
G = random.choice(KAC).getGroup(op.param1)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = False
random.choice(ABC).updateGroup(G)
Ticket = random.choice(KAC).reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
random.choice(KAC).updateGroup(G)
wait["blacklist"][op.param2] = True
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
if op.type == 65:
if wait["Unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict:
if msg_dict[msg_id]["from"]:
if msg_dict[msg_id]["text"] == 'ɢᴀᴍʙᴀʀʏᴀ ɪʟᴀɴɢ':
ginfo = cl.getGroup(at)
ika = cl.getContact(msg_dict[msg_id]["from"])
zx = ""
zxc = ""
zx2 = []
xpesan = "ᴘᴇsᴀɴ ᴅɪʜᴀᴘᴜs\nᴘᴇɴɢɪʀɪᴍ: "
ret_ = "ɴᴀᴍᴀ ɢʀᴜᴘ: {}".format(str(ginfo.name))
ret_ += "\nᴊᴀᴍ sʜᴀʀᴇ: {}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
ik = str(ika.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ika.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(at, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
else:
ginfo = cl.getGroup(at)
ika = cl.getContact(msg_dict[msg_id]["from"])
ika1 = "🚹{}".format(str(ika.displayName))
ika2 = "🏠:{}".format(str(ginfo.name))
ika3 = "🕙{}".format(dt_to_str(cTime_to_datetime(msg_dict[msg_id]["createdTime"])))
seber = "═══「 ᴘᴇsᴀɴ ᴅɪʜᴀᴘᴜs 」═══\n{}".format(str(msg_dict[msg_id]["text"]))
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"styles": {
"body": {
"backgroundColor": "#0000ff"
},
"footer": {
"backgroundColor": "#2f2f4f"
}
},
"type": "bubble",
"size": "micro",
"body": {
"contents": [
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
},{
"type": "text",
"text": "sᴇʟғʙᴏᴛ",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴛᴇᴍᴘʟᴀᴛᴇ",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴠᴇʀsɪ³",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"text": "📧📧📧",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#33ffff"
},
{
"text": "🖼️🖼️🖼️",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#33ffff"
},
{
"text": "📧📧📧",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"url": "https://obs.line-scdn.net/{}".format(str(ika.pictureStatus)),
"type": "image",
"size": "xxs",
"flex": 0
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "text",
"text": ika1,
"weight": "bold",
"color": "#33ffff",
"align": "center",
"size": "xxs",
"flex": 0
},{
"type": "separator",
"color": "#33ffff"
},{
"type": "text",
"text": ika3, #"🕙"+ datetime.strftime(timeNow,'%H:%M:%S'+"🕙"),
"weight": "bold",
"color": "#ccffff",
#"align": "center",
"size": "xxs",
"flex": 0
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "text",
"text": ika2, #"{}".format(cl.getContact(mid).displayName),
"weight": "bold",
"color": "#ffff00",
#"align": "center",
"size": "xxs",
"flex": 0
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"text": seber,
"size": "xxs",
# "align": "center",
"color": "#00ff00",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"text": "📧📧📧",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#33ffff"
},
{
"text": "🖼️🖼️🖼️",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#33ffff"
},
{
"text": "📧📧📧",
"size": "xxs",
"color": "#FF9900",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "image",
"url": "https://i.ibb.co/XWQd8rj/20190625-201419.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "https://youtube.com",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/ZHtFDts/20190427-185307.png", #chathttps://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/chat" #"http://line.me/ti/p/~bancat525",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/BomberBSSI",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 1
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/1sGhJdC/20190428-232658.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/timeline"
},
"flex": 1
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
},{
"type": "text",
"text": "ᴛʜᴀɴᴋᴢ ғᴏʀ",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "sᴜᴘᴏʀᴛ",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴛᴇᴀᴍ",
"weight": "bold",
"color": "#ccff00",
"size": "xxs",
"flex": 0
},
{
"type": "image",
"url": "https://media.tenor.com/images/3cfcb167ed18a35f3a52f70e44fdf6c0/tenor.gif",
"size": "xxs"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator", #batas APK
"color": "#33ffff"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
}
}
}
cl.postTemplate(at, data)
cl.sendImage(at, msg_dict[msg_id]["data"])
del msg_dict[msg_id]
except Exception as e:
print(e)
if op.type == 65:
if wait["Unsend"] == True:
try:
at = op.param1
msg_id = op.param2
if msg_id in msg_dict1:
if msg_dict1[msg_id]["from"]:
ginfo = cl.getGroup(at)
ryan = cl.getContact(msg_dict1[msg_id]["from"])
ret_ = "╔══「✯sᴛɪᴄᴋᴇʀ ɪɴғᴏ✯」\n"
ret_ += "┣[]►🚹: {}".format(str(ryan.displayName))
ret_ += "\n┣[]►🏠: {}".format(str(ginfo.name))
ret_ += "\n┣[]►🕘: {}".format(dt_to_str(cTime_to_datetime(msg_dict1[msg_id]["createdTime"])))
ret_ += "\n╚══「✯ᴜɴsᴇɴᴅ ғɪɴɪsʜ✯」"
ret_ += "{}".format(str(msg_dict1[msg_id]["text"]))
sendTextTemplate12(at, str(ret_))
cl.sendImage(at, msg_dict1[msg_id]["data"])
del msg_dict1[msg_id]
except Exception as e:
print(e)
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 1 or msg.toType == 2:
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 0:
msg_dict[msg.id] = {"text": msg.text, "from": msg._from, "createdTime": msg.createdTime, "contentType": msg.contentType, "contentMetadata": msg.contentMetadata}
if msg.contentType == 1:
path = cl.downloadObjectMsg(msg_id)
msg_dict[msg.id] = {"text":'ɢᴀᴍʙᴀʀʏᴀ ᴅɪʙᴀᴡᴀʜ',"data":path,"from":msg._from,"createdTime":msg.createdTime}
if msg.contentType == 7:
stk_id = msg.contentMetadata["STKID"]
stk_ver = msg.contentMetadata["STKVER"]
pkg_id = msg.contentMetadata["STKPKGID"]
ret_ = "\n╔══「✯sᴛɪᴄᴋᴇʀ ɪɴғᴏ✯]"
ret_ += "\n┣[]►sᴛɪᴄᴋᴇʀ ɪᴅ: {}".format(stk_id)
ret_ += "\n┣[]►sᴛɪᴄᴋᴇʀ ᴠᴇʀsɪᴏɴ: {}".format(stk_ver)
ret_ += "\n┣[]►sᴛɪᴄᴋᴇʀ: {}".format(pkg_id)
ret_ += "\n┣[]►ᴜʀʟ:{}".format(pkg_id)
ret_ += "\n╚══「✯ᴜɴsᴇɴᴅ ғɪɴɪsʜ✯」"
query = int(stk_id)
if type(query) == int:
data = 'https://stickershop.line-scdn.net/stickershop/v1/sticker/'+str(query)+'/ANDROID/sticker.png'
path = cl.downloadFileURL(data)
msg_dict1[msg.id] = {"text":str(ret_),"data":path,"from":msg._from,"createdTime":msg.createdTime}
#____________________________________________________________________
if op.type == 17 or op.type == 130:
if op.param1 in welcome:
ginfo = cl.getGroup(op.param1)
# cl.updateGroup(group)
contact = cl.getContact(op.param2)
cover = cl.getProfileCoverURL(op.param2)
welcomeMembers(op.param1, [op.param2])
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "kilo",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["welcome"],
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "68px",
"height": "16px",
"width": "84px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "150px",
"height": "16px",
"width": "140px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(op.param2).displayName),
"color": "#ffff00",
"size": "xxs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
#"backgroundColor": "#ffff00",
"offsetTop": "26px",
"offsetStart": "65px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "154px",
"height": "150px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "3px",
"borderColor": "#ff0000",
"offsetBottom": "900px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "W E L C O M E",
"size": "xs",
"color": "#ff0000",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ""+ datetime.strftime(timeNow,'%H:%M:%S'),
"size": "xs",
"color": "#ff0000",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "185px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "61px",
"height": "61px",
"offsetTop": "1px",
"offsetStart": "1px",
"offsetBottom": "1px",
"borderColor": "#0000ff",
"cornerRadius": "10px",
"borderWidth": "2px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": "#0000ff"
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(op.param1, data)
if op.type == 15:
if op.param1 in welcome:
ginfo = cl.getGroup(op.param1)
# cl.updateGroup(group)
contact = cl.getContact(op.param2)
cover = cl.getProfileCoverURL(op.param2)
leaveMembers(op.param1, [op.param2])
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "kilo",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["autoLeave"],
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "68px",
"height": "16px",
"width": "84px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "besok puskun amin",
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "140px",
"height": "16px",
"width": "140px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(op.param2).displayName),
"color": "#ffff00",
"size": "xxs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
#"backgroundColor": "#ffff00",
"offsetTop": "26px",
"offsetStart": "65px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "154px",
"height": "150px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "3px",
"borderColor": "#ff0000",
"offsetBottom": "900px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "A U T O L E F T",
"size": "xs",
"color": "#ff0000",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ""+ datetime.strftime(timeNow,'%H:%M:%S'),
"size": "xs",
"color": "#ff0000",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "185px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(op.param2).pictureStatus),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "61px",
"height": "61px",
"offsetTop": "1px",
"offsetStart": "1px",
"offsetBottom": "1px",
"borderColor": "#ffff00",
"cornerRadius": "10px",
"borderWidth": "2px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": "#ffff00"
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(op.param1, data)
#===cctv
if op.type == 55:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
contact = cl.getContact(op.param2)
cover = cl.getProfileCoverURL(op.param2)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
warna1 = ("#1AE501","#0108E5","#E50AE0","#E50F00","#DEE500","#47E1E5","#C82EF8","#FFF0F5","#B5ECEE","#E4EB85","#B0ABF9","#EAC6F4","#8BEFEF","#C0F785","#696969")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["mention"],
"weight": "bold",
"color": warnanya1,
"size": "xs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "3px",
"offsetTop": "45px",
#"backgroundColor": "#00ff00",
"offsetStart": "33px",
"height": "16px",
"width": "84px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": warnanya1,
"size": "xs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
"offsetTop": "40px",
"offsetStart": "33px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "154px",
"height": "150px",
"cornerRadius": "0px",
"position": "absolute",
"borderWidth": "3px",
"borderColor": warnanya1,
"offsetBottom": "40px",
"offsetStart": "900px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": " S I D E R",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "30px",
"offsetStart": "3px",
"cornerRadius": "100px",
"offsetBottom": "3px",
"borderColor": warnanya1,
"borderWidth": "1px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": warnanya1,
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(op.param1, data)
#========={{{{{MENTION}}}}}===========
if op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if settings ["Aip"] == True:
if msg.text in ["@zona","!bubarkan","Bypass"]:
cl.kickoutFromGroup(receiver,[sender])
if wait["selfbot"] == True:
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
cl.kickoutFromGroup(msg.to, [msg._from])
except:
try:
cl.kickoutFromGroup(msg.to, [msg._from])
except:
try:
cl.kickoutFromGroup(msg.to, [msg._from])
except:
try:
cl.kickoutFromGroup(msg.to, [msg._from])
except:
pass
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg._from)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
name = re.findall(r'@(\w+)', msg.text)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in mid:
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:2",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["Respontag"],
"weight": "bold",
"color": "#2bff44",
"size": "xs",
"offsetStart": "30px",
"offsetTop": "70px"
}
],
"position": "absolute",
"borderColor": "#ffff00",
"borderWidth": "2px",
"cornerRadius": "8px",
"offsetTop": "110px",
"backgroundColor": "#000000",
"offsetStart": "1px",
"height": "20px",
"width": "151px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": "#2bff44",
"size": "xs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "0px"
}
],
"position": "absolute",
"borderColor": "#ffff00",
"borderWidth": "2px",
"cornerRadius": "8px",
"margin": "none",
"backgroundColor": "#000000",
"height": "20px",
"width": "151px",
"offsetTop": "132px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["Respontag"],
"weight": "bold",
"color": "#2bff44",
"size": "xxs",
"offsetStart": "0px",
"offsetTop": "0px"
}
],
"position": "absolute",
"offsetTop": "111px",
#"backgroundColor": "#000000",
"offsetStart": "5px",
"width": "120px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": "#2bff44",
"size": "xxs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "0px"
}
],
"position": "absolute",
"margin": "none",
#"backgroundColor": "#000000",
"width": "120px",
"offsetTop": "115px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "120px",
"height": "90px",
"offsetTop": "18px",
"cornerRadius": "5px",
"position": "absolute",
"borderWidth": "2px",
"borderColor": "#ffff00",
"offsetBottom": "1px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "R E S P O N",
"size": "xxs",
"color": "#2bff44",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "3px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ""+ datetime.strftime(timeNow,'%H:%M:%S'),
"size": "xxs",
"color": "#2bff44",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "30px",
"height": "30px",
"offsetTop": "18px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": "#ffff00",
"offsetBottom": "1px",
"offsetStart": "123px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "30px",
"height": "30px",
"offsetTop": "48px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": "#ffff00",
"offsetBottom": "1px",
"offsetStart": "123px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "30px",
"height": "30px",
"offsetTop": "78px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": "#ffff00",
"offsetBottom": "1px",
"offsetStart": "123px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "30px",
"offsetStart": "900px",
"cornerRadius": "100px",
"offsetBottom": "3px",
"borderColor": "#ffff00",
"borderWidth": "1px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": "#ffff00"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if msg._from not in Bots:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
sendTextTemplate1(msg.to,"j̸͟͞a̸͟͞n̸͟͞g̸͟͞a̸͟͞n̸͟͞ t̸͟͞a̸͟͞g̸͟͞ g̸͟͞u̸͟͞a̸͟͞ n̸͟͞a̸͟͞n̸͟͞t̸͟͞i̸͟͞ k̸͟͞e̸͟͞j̸͟͞i̸͟͞t̸͟͞a̸͟͞k̸͟͞")
cl.kickoutFromGroup(to, [msg._from])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention2"] == True:
contact = cl.getContact(msg._from)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
name = re.findall(r'@(\w+)', msg.text)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in mid:
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["Respontag2"],
"weight": "bold",
"color": "#ffff00",
"size": "xs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "3px",
"offsetTop": "45px",
#"backgroundColor": "#00ff00",
"offsetStart": "33px",
"height": "16px",
"width": "84px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": "#00ff00",
"size": "xs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
"offsetTop": "40px",
"offsetStart": "33px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "154px",
"height": "150px",
"cornerRadius": "0px",
"position": "absolute",
"borderWidth": "3px",
"borderColor": "#0000ff",
"offsetBottom": "40px",
"offsetStart": "900px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "R E S P O N 2",
"size": "xs",
"color": "#00ff00",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "30px",
"offsetStart": "3px",
"cornerRadius": "100px",
"offsetBottom": "3px",
"borderColor": "#48D1CC",
"borderWidth": "1px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": "#48D1CC"
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention3"] == True:
contact = cl.getContact(msg._from)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
name = re.findall(r'@(\w+)', msg.text)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in mid:
data ={
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "4:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": wait["Respontag3"],
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "68px",
"height": "16px",
"width": "84px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"weight": "bold",
"color": "#000000",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "20px",
"backgroundColor": "#ffff00",
"offsetStart": "150px",
"height": "16px",
"width": "140px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": "#ffff00",
"size": "xxs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
#"backgroundColor": "#ffff00",
"offsetTop": "26px",
"offsetStart": "65px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "61px",
"height": "61px",
"cornerRadius": "10px",
"position": "absolute",
"borderWidth": "2px",
"borderColor": "#ff00ff",
"offsetBottom": "1px",
"offsetTop": "1px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "R E S P O N 3",
"size": "xs",
"color": "#00ff00",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ""+ datetime.strftime(timeNow,'%H:%M:%S'),
"size": "xs",
"color": "#00ff00",
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "1px",
"offsetStart": "185px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "61px",
"height": "61px",
"offsetTop": "900px",
"offsetStart": "1px",
"offsetBottom": "900px",
"borderColor": "#ff00ff",
"cornerRadius": "10px",
"borderWidth": "2px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": "#ff00ff"
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention4"] == True:
contact = cl.getContact(msg._from)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
name = re.findall(r'@(\w+)', msg.text)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in mid:
data ={
"type": "text",
"text": wait["Respontag4"],
"sentBy": {
"label": "{} ".format(contact.displayName),
"iconUrl": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"linkUrl": "https://www.linkpicture.com/q/line_35331838825946.jpg"}}
cl.postTemplate(to, data)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention5"] == True:
contact = cl.getContact(msg._from)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
name = re.findall(r'@(\w+)', msg.text)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in mid:
timeNow = datetime.now(tz=tz)
warna1 = ("#00ff00","#C0C0C0","#ffff00","#0000ff","#ff00ff","#00FFFF","#800000","#FF7F00","#BF00FF")
warnanya1 = random.choice(warna1)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents":{
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"aspectRatio": "2:2",
"position": "absolute",
"size": "full"
}
],
"width": "152px",
"height": "25px",
"cornerRadius": "0px",
"position": "absolute",
"borderWidth": "2px",
"borderColor": warnanya1,
"offsetBottom": "40px",
"offsetTop": "172px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"aspectRatio": "2:4",
"position": "absolute",
"size": "full"
}
],
"width": "152px",
"height": "160px",
"cornerRadius": "10px",
"position": "absolute",
"borderWidth": "2px",
"borderColor": warnanya1,
"offsetBottom": "40px",
"offsetTop": "222px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔘𝔡𝔞𝔥 𝔡𝔦𝔟𝔦𝔩𝔞𝔫𝔤 𝔧𝔞𝔫𝔤𝔞𝔫 𝔱𝔞𝔤 𝔱𝔢𝔯𝔲𝔰",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "222px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔨𝔞𝔩𝔞𝔲 𝔨𝔞𝔫𝔤𝔢𝔫 𝔭𝔪 𝔞𝔧𝔞",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "234px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔫𝔤𝔢𝔶𝔢𝔩𝔫𝔶𝔞 𝔪𝔦𝔫𝔱𝔞 𝔞𝔪𝔭𝔲𝔫",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "244px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔬𝔧𝔬 𝔫𝔤𝔢𝔱𝔞𝔤 𝔱𝔯𝔲𝔰 𝔩𝔥𝔬 𝔩𝔢𝔨",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "254px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔨𝔞𝔩𝔞𝔲 𝔫𝔞𝔨𝔰𝔦𝔯 𝔟𝔦𝔩𝔞𝔫𝔤 𝔞𝔧𝔞",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "264px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔩𝔞𝔫𝔤𝔰𝔲𝔫𝔤 𝔡𝔦𝔡𝔢𝔭𝔞𝔫",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "274px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "𝔭𝔢𝔫𝔤𝔥𝔲𝔩𝔲",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "284px",
"offsetStart": "4px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{} ".format(contact.displayName),
"color": warnanya1,
"size": "xs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "5px"
}
],
"position": "absolute",
"margin": "none",
"width": "220px",
"offsetTop": "155px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"aspectRatio": "2:3",
"position": "absolute",
"size": "full"
}
],
"width": "152px",
"height": "152px",
"cornerRadius": "0px",
"position": "absolute",
"borderWidth": "2px",
"borderColor": warnanya1,
"offsetBottom": "40px",
"offsetTop": "20px",
"offsetStart": "1px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(contact.pictureStatus),
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "154px",
"height": "150px",
"cornerRadius": "0px",
"position": "absolute",
"borderWidth": "3px",
"borderColor": warnanya1,
"offsetBottom": "40px",
"offsetStart": "900px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "R E S P O N 5",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ""+ datetime.strftime(timeNow,'%H:%M:%S'),
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "2px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"color": warnanya1,
"weight": "bold"
}
],
"position": "absolute",
"offsetTop": "200px",
"offsetStart": "10px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(op.param2)),
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "30px",
"offsetStart": "300px",
"cornerRadius": "100px",
"offsetBottom": "300px",
"borderColor": warnanya1,
"borderWidth": "1px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": warnanya1,
},
"styles": {
"body": {
"backgroundColor": "#ffffff"
}
}
}
]
}
}
cl.postTemplate(to, data)
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"「Cek ID Sticker」\n°❂° STKID : " + msg.contentMetadata["STKID"] + "\n°❂° STKPKGID : " + msg.contentMetadata["STKPKGID"] + "\n°❂° STKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
sendTextTemplate12(msg.to,"°❂° Nama : " + msg.contentMetadata["displayName"] + "\n°❂° MID : " + msg.contentMetadata["mid"] + "\n°❂° Status Msg : " + contact.statusMessage + "\n°❂° Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 and msg.toType == 2:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
elif msg.toType == 1:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 0:
to = receiver
if msg.contentType == 16:
if settings["checkPost"] == True:
url = msg.contentMetadata["postEndUrl"]
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
warna6 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya6 = random.choice(warna6)
warna4 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya4 = random.choice(warna4)
warna5 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya5 = random.choice(warna5)
cl.likePost(url[25:58], url[66:], likeType=1004)
cl.createComment(url[25:58], url[66:], wait["comment"])
cover = cl.getProfileCoverURL(sender)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "kilo",
"body": {
"backgroundColor": warnanya4,
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 1
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "sm",
"aspectMode": "cover",
"aspectRatio": "2:2",
"gravity": "bottom",
"action": {
"uri": "line://nv/profilePopup/mid=u8e603fce8dd01a68eeb8837342618f6d",
"type": "uri",
}
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"gravity": "bottom",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2",
"type": "uri",
}}],
"position": "absolute",
"cornerRadius": "8px",
"offsetTop": "0px",
"offsetStart": "0px",
"height": "0px",
"width": "0px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg", #"https://www.jimphicdesigns.com/downloads/imgs-mockup/bouncy-ball-change-colors-animation.gif",
"gravity": "bottom",
"size": "sm",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2",
"type": "uri",
}}],
"position": "absolute",
"cornerRadius": "8px",
"offsetTop": "-100px",
"offsetStart": "0px",
"height": "110px",
"width": "160px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 2
"url": cover, #"https://obs.line-scdn.net/{}".format(cl.getContact(sender).pictureStatus),
"gravity": "bottom",
"size": "xs",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"type": "uri",
}}],
"position": "absolute",
"cornerRadius": "5px",
"offsetTop": "5px",
"offsetStart": "2px",
"height": "150px",
"width": "50px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 2
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(sender).pictureStatus),
"gravity": "bottom",
"size": "xs",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2",
"type": "uri",
}}],
"position": "absolute",
"borderWidth": "2px",
"cornerRadius": "8px",
"offsetTop": "2px",
"offsetStart": "2px",
"borderColor": warnanya5,
"height": "60px",
"width": "60px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": text,
"weight": "bold",
"color": warnanya4,
"size": "xs",
"offsetStart": "30px",
"offsetTop": "70px"
}
],
"position": "absolute",
"borderColor": warnanya6,
"borderWidth": "2px",
"cornerRadius": "8px",
"offsetTop": "15px",
"backgroundColor": warnanya5,
"offsetStart": "60px",
"height": "20px",
"width": "151px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": text,
"color": warnanya6,
"size": "xs",
"margin": "xxl",
"style": "normal",
"decoration": "underline",
"offsetStart": "0px"
}
],
"position": "absolute",
"borderColor": warnanya4,
"borderWidth": "2px",
"cornerRadius": "8px",
"margin": "none",
"backgroundColor": warnanya5,
"height": "20px",
"width": "151px",
"offsetTop": "35px",
"offsetStart": "60px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": " "+ datetime.strftime(timeNow,'%H:%M:%S'),
"weight": "bold",
"color": "#2bff44",
"align": "center",
"size": "xs",
"offsetTop": "3px"
}
],
"position": "absolute",
"cornerRadius": "7px",
"offsetTop": "12px",
#"backgroundColor": "#33ffff",
"offsetStart": "70px",
"height": "20px",
"width": "75px"
},
{
"type": "box",
"layout": "vertical",
"contents": [ #weh
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xxs",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~behboaedan021",
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xxs",
"action": {
"type": "uri",
"uri": "Https://smule.com/rs__family",
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xxs",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/ZHtFDts/20190427-185307.png", #chathttps://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xxs",
"action": {
"type": "uri",
"uri": "line://nv/chat",
},
"flex": 0
}
],
"position": "absolute",
"offsetTop": "5px",
"offsetStart": "-100px",
"height": "200px",
"width": "25px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"weight": "bold",
"color": warnanya6,
"align": "center",
"size": "xs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "63px",
#"backgroundColor": "#ff0000",
"offsetStart": "-5px",
"height": "15px",
"width": "75px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": " 👿Like done....",
"weight": "bold",
"color": warnanya4,
"align": "center",
"size": "sm",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "0px",
"offsetTop": "-2px",
#"backgroundColor": "#ff0000",
"offsetStart": "120px",
"height": "30px",
"width": "100px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "25px",
"height": "25px",
"offsetTop": "55px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": warnanya4,
"offsetBottom": "1px",
"offsetStart": "121px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "25px",
"height": "25px",
"offsetTop": "55px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": warnanya5,
"offsetBottom": "1px",
"offsetStart": "150px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "25px",
"height": "25px",
"offsetTop": "55px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": warnanya6,
"offsetBottom": "1px",
"offsetStart": "180px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"aspectMode": "cover",
"position": "absolute",
"size": "full"
}
],
"width": "25px",
"height": "25px",
"offsetTop": "55px",
"cornerRadius": "100px",
"position": "absolute",
"borderWidth": "1px",
"borderColor": warnanya4,
"offsetBottom": "1px",
"offsetStart": "210px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": " "+ datetime.strftime(timeNow,'%Y-%m-%d'),
"weight": "bold",
"color": "#2bff44",
"size": "xs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "7px",
"offsetTop": "35px",
#"backgroundColor": "#0000ff",
"offsetStart": "70px",
"height": "15px",
"width": "85px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"cornerRadius": "15px",
"position": "relative",
"borderColor": warnanya5,
},
"styles": {
"body": {
"backgroundColor": warnanya6,
}
}
},
]
}
}
cl.postTemplate(to, data)
if msg.contentType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\n「Link Sticker」" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
sendTextTemplate12(msg.to,"Nama : " + msg.contentMetadata["displayName"] + "\nMID : " + msg.contentMetadata["mid"] + "\nStatus Msg : " + contact.statusMessage + "\nPicture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if msg.contentType == 13: #or op.type == 124:
if msg._from in admin:
if wait["invite"] == True:
msg.contentType = 0
contact = cl.getContact(msg.contentMetadata["mid"])
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if invite in wait["blacklist"]:
sendTextTemplate12(msg.to, "ʟɪsᴛ ʙʟ")
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
ryan = cl.getContact(target)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 sᴜᴋsᴇs ɪɴᴠɪᴛᴇ 」\nɴᴀᴍᴀ"
ret_ = "ᴋᴇᴛɪᴋ ɪɴᴠɪᴛᴇ ᴏғғ ᴊɪᴋᴀ sᴜᴅᴀʜ ᴅᴏɴᴇ"
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
cl.sendMessage(msg.to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
wait["invite"] = False
break
except:
sendTextTemplate12(msg.to,"ᴀɴᴅᴀ ᴛᴇʀᴋᴇɴᴀ sᴛʀᴜᴋ")
wait["invite"] = False
break
if msg.contentType == 13: # or op.type == 124:
if wait["Invi"] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendMessage(msg.to,"-> " + _name + " was here")
wait["Invi"] = False
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
sendTextTemplate12(msg.to,"ᴅᴏɴᴇ ᴊᴇᴘɪᴛ ᴊᴏᴍʙʟᴏ\n➡" + _name)
wait["Invi"] = False
break
#=============MEDIA FOTOBOT=============
if msg.contentType == 2:
if msg._from in admin:
if msg._from in settings["ChangeVideoProfilevid"]:
settings["ChangeVideoProfilePicture"][msg._from] = True
del settings["ChangeVideoProfilevid"][msg._from]
cl.downloadObjectMsg(msg_id,'path','video.mp4')
sendTextTemplate12(msg.to,"Send gambarnya...")
if msg.contentType == 1:
if msg._from in admin:
if msg._from in settings["ChangeVideoProfilePicture"]:
del settings["ChangeVideoProfilePicture"][msg._from]
cl.downloadObjectMsg(msg_id,'path','image.jpg')
cl.nadyacantikimut('video.mp4','image.jpg')
sendTextTemplate12(msg.to,"ᴠɪᴅᴇᴏ ᴘʀᴏғɪʟᴇ ᴅᴏɴᴇ")
if msg.contentType == 1:
if msg._from in admin:
if settings["Addimage"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
images[settings["Addimage"]["name"]] = str(path)
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to, "ᴅᴏɴᴇ ɢᴀᴍʙᴀʀ {}".format(str(settings["Addimage"]["name"])))
settings["Addimage"]["status"] = False
settings["Addimage"]["name"] = ""
if msg.contentType == 2:
if msg._from in admin:
if settings["Addvideo"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
videos[settings["Addvideo"]["name"]] = str(path)
f = codecs.open("video.json","w","utf-8")
json.dump(videos, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to, "Berhasil menambahkan video {}".format(str(settings["Addvideo"]["name"])))
settings["Addvideo"]["status"] = False
settings["Addvideo"]["name"] = ""
if msg.contentType == 7:
if msg._from in admin:
if settings["Addsticker"]["status"] == True:
stickers[settings["Addsticker"]["name"]] = {"STKID":msg.contentMetadata["STKID"],"STKPKGID":msg.contentMetadata["STKPKGID"]}
f = codecs.open("sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to, "ᴅᴏɴᴇ sᴛɪᴄᴋᴇʀ {}".format(str(settings["Addsticker"]["name"])))
settings["Addsticker"]["status"] = False
settings["Addsticker"]["name"] = ""
if msg.contentType == 3:
if msg._from in admin:
if settings["Addaudio"]["status"] == True:
path = cl.downloadObjectMsg(msg.id)
audios[settings["Addaudio"]["name"]] = str(path)
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to, "Berhasil menambahkan mp3 {}".format(str(settings["Addaudio"]["name"])))
settings["Addaudio"]["status"] = False
settings["Addaudio"]["name"] = ""
if msg.contentType == 0:
if settings["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
for sticker in stickers:
if text.lower() == sticker:
sid = stickers[text.lower()]["STKID"]
spkg = stickers[text.lower()]["STKPKGID"]
cl.sendSticker(to, spkg, sid)
for image in images:
if text.lower() == image:
cl.sendImage(msg.to, images[image])
for audio in audios:
if text.lower() == audio:
cl.sendAudio(msg.to, audios[audio])
for video in videos:
if text.lower() == video:
cl.sendVideo(msg.to, videos[video])
if msg.contentType == 13:
if msg._from in owner:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
sendTextTemplate12(msg.to,"Already in bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
sendTextTemplate12(msg.to,"Succes add bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
sendTextTemplate12(msg.to,"Succes delete bot")
else:
wait["dellbots"] = True
sendTextTemplate12(msg.to,"Nothing in bot")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
sendTextTemplate12(msg.to,"ᴡᴇs ᴊᴀᴅɪ sᴛᴀғғ")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
sendTextTemplate12(msg.to,"ᴅᴏɴᴇ ᴀᴅᴅsᴛᴀғғ")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
sendTextTemplate12(msg.to,"✅sᴛᴀғғ ᴅɪʜᴀᴘᴜs")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
sendTextTemplate12(msg.to,"❎bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
sendTextTemplate12(msg.to,"✅sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
sendTextTemplate12(msg.to,"ᴅᴏɴᴇ ᴀᴅᴅᴀᴅᴍɪɴ")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
sendTextTemplate12(msg.to,"✅ᴀᴅᴍɪɴ ᴅɪʜᴀᴘᴜs")
else:
wait["delladmin"] = True
sendTextTemplate12(msg.to,"itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
sendTextTemplate12(msg.to,"❎Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
sendTextTemplate12(msg.to,"✅Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
sendTextTemplate12(msg.to,"✅Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
sendTextTemplate12(msg.to,"❎Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
sendTextTemplate12(msg.to,"✅Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
sendTextTemplate12(msg.to,"✅Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
sendTextTemplate12(msg.to,"✅Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
sendTextTemplate12(msg.to,"❎Contact itu tidak ada di Talkban")
#WITHLIST
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in ban["blacklist"]:
sendFooter(to,"「 Blacklist 」\nContact Already In Blacklist -_-")
wait["wblacklist"] = False
else:
ban["blacklist"].append(msg.contentMetadata["mid"])
sendFooter(to,"「 Blacklist 」\nSuccess Add Contact To Blacklist ^_^")
wait["wblacklist"] = False
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in ban["blacklist"]:
ban["blacklist"].remove(msg.contentMetadata["mid"])
sendFooter(to,"「 Blacklist 」\nSuccess Delete Contact From Blacklist ^_^")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
sendFooter(to,"「 Blacklist 」\nContact Not In Blacklist -_-")
if wait["wwhitelist"] == True:
if msg.contentMetadata["mid"] in settings["whitelist"]:
sendFooter(to,"「 Whitelist 」\nContact Already In Whitelist -_-")
wait["wwhitelist"] = False
else:
settings["whitelist"].append(msg.contentMetadata["mid"])
sendFooter(to,"「 Whitelist 」\nSuccess Add Contact To Whitelist ^_^")
wait["wwhitelist"] = False
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in owner:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = cl.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
sendTextTemplate12(msg.to, "Succes add picture")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.contentType == 2:
if settings["changevp"] == True:
contact = cl.getProfile()
path = cl.downloadFileURL("https://obs.line-scdn.net/{}".format(contact.pictureStatus))
path1 = cl.downloadObjectMsg(msg_id)
settings["changevp"] = False
changeVideoAndPictureProfile(path, path1)
sendTextTemplate12(to, "ᴅᴏɴᴇ vɪᴅᴇᴏ ᴘʀᴏғɪʟᴇ")
if msg.contentType == 2:
if msg._from in owner or msg._from in admin or msg._from in staff:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
sendTextTemplate12(msg.to, "ᴅᴏɴᴇ ᴘɪᴄᴛ ɢʀᴜᴘ")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["RAfoto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["RAfoto"][mid]
cl.updateProfilePicture(path)
sendTextTemplate906(msg.to,"ғᴏᴛᴏ ʙᴇʀʜᴀsɪʟ")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path5 = cl.downloadObjectMsg(msg_id)
settings["changePicture"] = False
cl.updateProfilePicture(path)
cl.sendMessage(msg.to, "Sukses..")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
sendTextTemplate906(msg.to, str(helpMessage))
if cmd == "bot on":
if msg._from in admin:
wait["selfbot"] = True
sendTextTemplate12(msg.to, "ʙᴏᴛ ᴡᴇs ᴏɴ")
elif cmd == "bot off":
if msg._from in admin:
wait["selfbot"] = False
sendTextTemplate12(msg.to, "ʙᴏᴛ ᴡᴇs ᴍᴏᴅᴀʀ")
elif cmd == "cvp":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changevp"] = True
sendTextTemplate12(to, "sʜᴀʀᴇ ᴠɪᴅᴇᴏɴʏᴀ")
elif cmd == "help1":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
helpMessage1 = helpcreator()
sendTextTemplate23(msg.to, str(helpMessage1))
elif cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage2 = helpsetting()
sendTextTemplate23(msg.to, str(helpMessage2))
elif cmd == "help3":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage3 = media()
sendTextTemplate23(msg.to, str(helpMessage3))
elif cmd == "help4":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage4 = helpgroup()
sendTextTemplate23(msg.to, str(helpMessage4))
elif cmd == "help5":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage5 = helpadmin()
sendTextTemplate23(msg.to, str(helpMessage5))
elif cmd == "help6":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage6 = helpghost()
sendTextTemplate23(msg.to, str(helpMessage6))
elif cmd == "help wl":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage7 = helpwl()
sendTextTemplate906(msg.to, str(helpMessage7))
elif cmd == "help bl":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage8 = helpbl()
sendTextTemplate906(msg.to, str(helpMessage8))
elif cmd == "menu":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Me\n✒ Cvp\n✒ Setting\n✒ Runtime\n✒ Speed-Sp\n✒ Tag\n✒ Bye\n✒ Lvall\n✒ Friendlist\n✒ Gruplist\n✒ Open [qr]\n✒ Close [qr]", #1
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Set tag: [text]\n✒ Set tag2: [text]\n✒ Rtag: [Nogc]\n✒ Jepit\n✒ Block\n✒ Addme @\n✒ Mybot\n✒ Listpending\n✒ Blockcontact\n✒ Lkstblock\n✒ Listmid\n✒ Addasis", #2
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Broadcast: [text]\n✒ Ceksider\n✒ Cekleave\n✒ Cekpesan\n✒ Cekrespon\n✒ Cekrespon2\n✒ Set sider:\n✒ Set pesan:\n✒ Set respon:\n✒ Set respon2\n✒ Set welcome:\n✒ Set leave:", #3
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Like on/off\n✒ On/Off [sider]\n✒ Stag: jumlah\n✒ Stag @\n✒ Call: jumlah\n✒ Call\n✒ Scallto\n✒ Post on/off\n✒ Sticker on/off\n✒ Invite on/off\n✒ Unsend on/off\n✒ Respon on/off ", #4
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Respon2 on/off\n✒ Autoadd on/off\n✒ Autoleave on/off\n✒ Autoblock on/off\n✒ Jointicket on/off\n✒ Addmp3\n✒ Addaudio\n✒ Addimg\n✒ Dellsticker\n✒ Dellaudio\n✒ Dellmp3\n✒ Dellvideo ", #5
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Dellimg\n✒ Liststicker\n✒ Listimage\n✒ Listvideo\n✒ Listaudio\n✒ Listmp3\n✒ Lihat [no]\n✒ Cctv metro\n✒ Smule id\n✒ Joox text\n✒ Mp4 text\n✒ Mp3 text ", #6
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Youtube text\n✒ Yutube text\n✒ Getid @\n✒ Getmid @\n✒ Getbio @\n✒ Getinfo @\n✒ Getprofile @\n✒ Getpicture @\n✒ Info @\n✒ Kepo @\n✒ Ppvideo @\n✒ Kontak @ ", #7
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Contact: mid\n✒ Gname text\n✒ Mymid\n✒ Mybio\n✒ Myfoto\n✒ Myname\n✒ Myprofile\n✒ Mypicture\n✒ Mycover\n✒ Updategrup\n✒ Gruppict\n✒ Infogrup [no] ", #8
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Infomem [no]\n✒ Staf @\n✒ Stafdell @\n✒ Admin @\n✒ Admindell @\n✒ Reboot\n✒ Ban @ ", #9
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Bl\n✒ Ban:on @\n✒ Unban:on @\n✒ Unban @\n✒ Banlist\n✒ Cb\n✒ Refresh\n✒ Menu js\n✒ Menu respon\n✒ Menu sticker", #10
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd == "menu js":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Kiss @\n✒ Gkick @\n✒ Pelakor [virus]\n✒ Jilat [kickall\n &virus]\n✒ Rusak [kickall] ", #10
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd == "menu respon":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Respon on/off \n✒ Respon2 on/off \n✒ Respon3 on/off\n✒ Respon4 on/off\n✒ Respon5 on/off\n✒ ==========\n✒ ==========\n✒ St on/off\n✒ Notif on/off ", #10
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd == "menu sticker":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Sedih\n✒ Hajar\n✒ Bobok\n✒ Asem\n✒ Assalamualaikum\n✒ Sip\n✒ Nyimak\n✒ Sebel\n✒ Capek\n✒ Mmuach\n✒ Peluk\n✒ Kangen\n✒ Thanks\n✒ Ok\n✒ Cium", #10
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd == ".menu sticker":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:5",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "265px",
"offsetStart": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "270px",
"offsetStart": "-14px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n✒ Asemm\n✒ Adem\n✒ Muach\n✒ ngintip\n haha\n✒ Wkwk\n✒ Amin\n✒ Kabur\n✒ Siap\n✒ Maaf\n✒ Walaikumsalam\n✒ Absen", #10
"size": "xxs",
"weight": "bold",
"color": "#00ff00",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#C0C0C0",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#000000"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd.startswith("menu "):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
ky = key["MENTIONEES"][0]["M"]
m = cl.getContact(ky)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ᴍᴇ\n◯ ᴠᴘ\n◯ sᴇᴛᴛɪɴɢ\n◯ ʀᴜɴᴛɪᴍᴇ\n◯ sᴘᴇᴇᴅ\n◯ sᴘ\n◯ sᴀɴᴛᴇᴛ ᴍᴀɴᴛᴀɴ\n◯ ʙʏᴇᴍᴇ\n◯ ʀᴇᴊᴇᴄᴛ\n◯ ғʀɪᴇɴᴅʟɪsᴛ", #1
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ʙᴏᴛᴀᴅᴅ\n◯ ʙᴏᴛᴅᴇʟʟ\n◯ sᴛᴀғғ\n◯ sᴛᴀғᴅᴇʟʟ\n◯ ᴀᴅᴍɪɴᴅᴇʟʟ\n◯ ᴀᴅᴍɪɴ\n◯ ʀᴇʙᴏᴏᴛ\n◯ ʙᴀɴ\n◯ ʙʟᴄ\n◯ ʙᴀɴ:", #2
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ɢᴍɪᴅ\n◯ ɢᴇᴛ ɪᴅ\n◯ ɢᴇᴛᴍɪᴅ\n◯ ɢᴇᴛʙɪᴏ\n◯ ɢᴇᴛɪɴғᴏ\n◯ ɢᴇᴛᴘʀᴏғɪʟᴇ\n◯ ɢᴇᴛᴘɪᴄᴛᴜʀᴇ\n◯ ɪɴғᴏ\n◯ ᴋᴇᴘᴏ\n◯ ᴘᴘᴠɪᴅᴇᴏ", #3
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44"
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ᴄᴇᴋ sɪᴅᴇʀ\n◯ ᴄᴇᴋ ʟᴇᴀᴠᴇ\n◯ ᴄᴇᴋ ᴘᴇsᴀɴ\n◯ ᴄᴇᴋ ʀᴇsᴘᴏɴ\n◯ ᴄᴇᴋ ʀᴇsᴘᴏɴ²\n◯ sᴇᴛ sɪᴅᴇʀ:\n◯ sᴇᴛ ᴘᴇsᴀɴ:\n◯ sᴇᴛ ʀᴇsᴘᴏɴ:\n◯ sᴇᴛ ʀᴇsᴘᴏɴ²:\n◯ sᴇᴛ ᴡᴇʟᴄᴏᴍᴇ:", #4
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ Addsticker\n◯ Addmp3\n◯ Addaudio\n◯ Addimg\n◯ Dellsticker\n◯ Dellaudio\n◯ Dellmp3\n◯ Dellvideo\n◯ Dellimg\n◯ Liststicker", #5
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ᴀᴊsɴᴀᴍᴇ:\n◯ ᴀᴊsғᴏᴛᴏ\n◯ ᴀᴊs ᴄᴀɴᴄᴇʟ\n◯ ᴀᴊs ᴋɪᴄᴋᴀʟ\n◯ ᴀᴊs ᴀʙsᴇɴ\n◯ ᴘᴀs ʙᴀɴᴅ\n◯ ᴘᴀs ʙᴀɴᴅ\n◯ ᴄᴀɴᴄᴇʟᴀʟʟ\n◯ ᴄʀᴏᴛ\n◯ ɢᴋɪᴄᴋ", #6
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ᴋᴏɴᴛᴀᴋ\n◯ ᴄᴏɴᴛᴀᴄᴛ:\n◯ ɢɴᴀᴍᴇ\n◯ ᴍʏᴍɪᴅ\n◯ ᴍʏʙɪᴏ\n◯ ᴍʏғᴏᴛᴏ\n◯ ᴍʏɴᴀᴍᴇ\n◯ ᴍʏᴘʀᴏғɪʟᴇ\n◯ ᴍʏᴘɪᴄᴛᴜʀᴇ\n◯ ᴍʏᴄᴏᴠᴇʀ", #7
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ sᴇᴛ ʟᴇᴀᴠᴇ:\n◯ ʟɪᴋᴇ\n◯ ᴘᴏsᴛ\n◯ sᴛɪᴄᴋᴇʀ\n◯ ɪɴᴠɪᴛᴇ\n◯ ᴜɴsᴇɴᴅ\n◯ ʀᴇsᴘᴏɴ\n◯ ʀᴇsᴘᴏɴ²\n◯ ᴀᴜᴛᴏᴀᴅᴅ\n◯ ᴡᴇʟᴄᴏᴍᴇ", #8
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ Listimage\n◯ Listvideo\n◯ Listaudio\n◯ Listmp3\n◯ Lihat\n◯ Cctv metro\n◯ Ocmp4\n◯ Joox\n◯ mp4\n◯ mp3", #9
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(m.pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"size": "xs",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "25px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "ᴍᴇɴᴜ ʜᴇʟᴘ",
"size": "xs",
"color": "#2bff44",
"weight": "bold",
"style": "normal",
"align": "center"
}
],
"position": "absolute",
"width": "105px",
"height": "15px",
"offsetTop": "5px",
"offsetStart": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": ".",
"color": "#2bff44",
"size": "xxs"
}
],
"position": "absolute",
"width": "125px",
"height": "1px",
"backgroundColor": "#5eff7e",
"offsetTop": "200px",
"offsetStart": "15px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xs",
"style": "normal",
"weight": "bold",
"align": "center",
"color": "#2bff44",
}
],
"position": "absolute",
"width": "148px",
"height": "25px",
"offsetTop": "205px",
"offsetStart": "2px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "\n◯ ᴋɪᴄᴋ\n◯ sᴛᴀʏ\n◯ ᴊs ɪɴ-ᴏᴜᴛ\n◯ ɢʟɪsᴛᴊs\n◯ ᴋ1-ɪɴᴠɪᴛ\n◯ ᴀᴅᴅᴀsɪs\n◯ ʙʀᴏᴀᴅᴄᴀsᴛ:\n◯ ɢʀᴜᴘᴘɪᴄᴛ\n◯ ɪɴғᴏɢʀᴏᴜᴘ ɴᴏ\n◯ ɪɴғᴏᴍᴇᴍ ɴᴏ", #10
"size": "xxs",
"weight": "bold",
"color": "#2bff44",
"style": "normal",
"wrap": True,
"offsetTop": "0px",
"offsetStart": "0px"
}
],
"position": "absolute",
"offsetTop": "28px",
"offsetStart": "15px"
}
],
"paddingAll": "0px",
"borderWidth": "3px",
"borderColor": "#5eff7e",
"cornerRadius": "15px"
},
"styles": {
"body": {
"backgroundColor": "#5eff7e"
}
}
}
]
}
}
cl.postTemplate(to, data)
elif cmd.startswith("rname "):
if msg._from in admin:
sep = text.split(" ")
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
cl.renameContact(ls,sep[1])
cl.sendReplyMention(msg_id, to, "Succes change @! display name to {}".format(sep[1]), [ls])
elif cmd == "sett":
if wait["selfbot"] == True:
if msg._from in admin:
contact = cl.getProfile()
mids = [contact.mid]
cover = cl.getProfileCoverURL(sender)
listTimeLiking = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = ""
if settings["checkPost"] == True: md+="║║😘 Post : ✅\n"
else: md+="║║😈 Post : ❌\n"
if wait["likeOn"] == True: md+="║║😘 Like : ✅\n"
else: md+="║║😈 Like ❌\n"
if wait["contact"] == True: md+="║║😘 Contact : ✅\n"
else: md+="║║😈 Contact : ❌\n"
if wait["Mentionkick"] == True: md+="║║😘 Notag : ✅\n"
else: md+="║║😈 Notag : ❌\n"
if wait["detectMention"] == True: md+="║║😘 Respontag : ✅\n"
else: md+="║║😈 Respontag : ❌\n"
if wait["detectMention2"] == True: md+="║║😘 Respontag2 : ✅\n"
else: md+="║║😈 Respontag2 : ❌\n"
if wait["Unsend"] == True: md+="║║😘 Unsend : ✅\n"
else: md+="║║😈 Unsend : ❌\n"
if wait["autoAdd"] == True: md+="║║😘 Autoadd : ✅\n"
else: md+="║║😈 Autoadd : ❌\n"
if wait["autoLeave"] == True: md+="║║😘 Autoleave : ✅\n"
else: md+="║║😈 Autoleave : ❌\n"
if wait["autoJoin"] == True: md+="║║😘 Autojoin : ✅\n"
else: md+="║║😈 Autojoin : ❌\n"
if wait["sticker"] == True: md+="║║😘 Sticker : ✅\n"
else: md+="║║😈 Sticker ❌\n"
if settings["autoJoinTicket"] == True: md+="║║😘 Jointicket : ✅\n"
else: md+="║║😈 Jointicket : ❌\n"
if wait["autoReject"] == True: md+="║║😘 Autoreject : ✅\n"
else: md+="║║😈 Autoreject : ❌\n"
if wait["autoBlock"] == True: md+="║║😘 Autoblock : ✅\n"
else: md+="║║😈 Autoblock : ❌\n"
if settings["welcome"] == True: md+="║║😘 Welcome : ✅\n"
else: md+="║║😈 Welcome : ❌\n"
sendTextTemplate906(msg.to, "╔════════════════\n"+ md +"╚════════════════")
elif text.lower() == "mid" or text.lower() == "mid":
data = {
"type": "text",
"text": "{}".format(msg._from),
"sentBy": {
"label": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg"}}
cl.postTemplate(to, data)
elif text.lower() == "suek" or text.lower() == "sue":
data = {
"type": "text",
"text": "ᴋᴀʟᴏ ɢᴀᴋ sᴜᴇᴋ ᴍᴀɴᴀ ʙɪsᴀ ᴘɪᴘɪs ᴋᴋ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "dul" or text.lower() == "dudul":
data = {
"type": "text",
"text": "ᴅᴜᴅᴜʟ ɪᴛᴜ ᴍᴀɴɪs ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 ??𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "sem" or text.lower() == "asem":
data = {
"type": "text",
"text": "ᴋᴀsɪʜ ɢᴜʟᴀ ᴅᴜᴀ ᴋɪʟᴏ ʙɪᴀʀ ᴍᴀɴɪs ᴋᴋ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "pagi" or text.lower() == "esok":
data = {
"type": "text",
"text": "ᴊᴀɴɢᴀɴ ʟᴜᴘᴀ sɪᴋᴀᴛ ɢɪɢɪ ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "siang" or text.lower() == "terang":
data = {
"type": "text",
"text": "ᴜᴅᴀʜ sɪᴀɴɢ ʏᴀ ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "sore" or text.lower() == "petang":
data = {
"type": "text",
"text": "ɴᴀʜ ᴡᴀᴋᴛᴜɴʏᴀ ɴɪᴋᴜɴɢ ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "flex" or text.lower() == "flx":
data = {
"type": "text",
"text": "\n Url flex in bots\n \n \n line://app/1602687308-GXq4Vvk9\n",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "malam" or text.lower() == "mlm":
data = {
"type": "text",
"text": "malam kk udah waktunya bobok😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "njir" or text.lower() == "anjir":
data = {
"type": "text",
"text": "ᴅɪᴍᴀɴᴀ ʏᴀɴɢ ʙᴀɴᴊɪʀ ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "kantuk" or text.lower() == "ngantuk":
data = {
"type": "text",
"text": "ᴍᴀɴᴅɪ ᴋᴀʟᴏ ɴɢᴀɴᴛᴜᴋ ᴋᴋ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "ange" or text.lower() == "angen":
data = {
"type": "text",
"text": "ᴊᴇᴘɪᴛɪɴ ᴅɪᴘɪɴᴛᴜ ʙɪᴀʀ sᴇᴍʙᴜʜ ᴋᴋ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "jawab" or text.lower() == "wasalam":
data = {
"type": "text",
"text": "وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "waallaikumsalam" or text.lower() == "walaikumsalam":
data = {
"type": "text",
"text": "ᴘɪɴᴛᴇʀ ᴘᴀsᴛɪ ʀᴀᴊɪɴ ᴍᴇɴᴀʙᴜɴɢ ɴɪʜ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "asalamualaikum" or text.lower() == "asalam":
data = {
"type": "text",
"text": "وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "kampret" or text.lower() == "pret":
data = {
"type": "text",
"text": "ᴋᴀʀᴘᴇᴛ ɪᴛᴜ ʙᴜᴀᴛ ᴅɪʟᴀɴᴛᴀɪ ʏᴀᴋᴀɴ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "susu" or text.lower() == "susu":
data = {
"type": "text",
"text": "sᴜsᴜ ᴋᴇɴᴛᴀʟ ᴄᴀᴘ ɴɢᴜᴛᴀɴɢ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "kopi" or text.lower() == "ngopi":
data = {
"type": "text",
"text": "ɴɢᴏᴘɪ ᴍᴜʟᴜ ᴋᴀᴘᴀɴ ɴʏᴜsᴜɴʏᴀ",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "sepi" or text.lower() == "sepi":
data = {
"type": "text",
"text": "ɪʏᴀ ᴋᴀᴋ sᴇᴘɪ ᴘᴀᴋᴇᴛᴀɴ ᴘᴀᴅᴀ ʟɪᴍɪᴛ😂",
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "line://nv/profilePopup/mid=u4de05f388b68f9910a3fe75124bc7ab2"}}
cl.postTemplate(to, data)
elif text.lower() == "sedih":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://3.bp.blogspot.com/-OfIz4mSIumw/WbLEZw7l6nI/AAAAAAARd6Y/Dxzos1SA_5MU32bXFTKToLDndM7YpV7WACLcBGAs/s1600/AW529310_04.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="hajar":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/y0wP3fJ/tai-line.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="bobok":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/52002761/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="kiss":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/13386301/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="asem":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/72847952/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="assalamualaikum":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://www.linkpicture.com/q/unnamed-1_12.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="sip":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/4976950/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="nyimak":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/72847962/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="sebel":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/15417576/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="capek":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/64774429/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="kopi":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/64774422/ANDROID/sticker.png",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="mmuach" or text.lower() =="emuach" or text.lower() =="emmuach":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/27533208/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="peluk":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/20943951/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="kangen":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/27533210/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="thanks" or text.lower() =="makasi" or text.lower() =="terimakasih":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/27533215/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="ok" or text.lower() =="oke" or text.lower() =="okay":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/27533213/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="cium":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/52002737/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="asemm":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://www.linkpicture.com/q/06e3ef23edf41d0fc0f95d8cf30f9aac.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="dudul" or text.lower() =="pekok":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://www.linkpicture.com/q/1e92d8921a24d912c16a9f6af8acc534.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="adem":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://www.linkpicture.com/q/unnamed_30.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="muach":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://stickershop.line-scdn.net/stickershop/v1/sticker/27533209/IOS/[email protected]",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="ngintip":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://www.linkpicture.com/q/209563.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "https://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="haha":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/xHDXBrd/AW316783-23.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="wkwk":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.pinimg.com/originals/9e/bb/f7/9ebbf7a320a06fb9a254b2f521bbd4ec.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="amin":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.pinimg.com/originals/c5/1d/da/c51ddaae928617f00f962eb96fd4af33.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="kabur":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/FsTqdpd/fac502b083ab70051050890b99bb6e73.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="siap":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/TKZ2KVD/AW316783-09.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="maaf":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/LJXgPb2/AW316783-21.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="walaikumsalam":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/cgXn5dL/AW1575362-01.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif text.lower() =="absen":
if wait["jumbosticker"] == True:
url = "https://game.linefriends.com/jbp-lcs-ranking/lcs/sendMessage"
to = msg.to
data = {
"type": "template",
"altText": "{} sent a sticker".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": [
{
"imageUrl": "https://i.ibb.co/Rytk8fV/AW316783-08.gif",
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
]
}
}
cl.postTemplate(to, data)
elif ("Gname " in msg.text):
if msg._from in admin:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gname ","")
cl.updateGroup(X)
elif "Gruppict" in msg.text:
if msg._from in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Getprofile " in msg.text:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
profile = cl.getContact(mention['M'])
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+profile.pictureStatus)
except Exception as e:
pass
elif "Getinfo " in msg.text:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
sendTextTemplate12(msg.to,"Nama:\n" + contact.displayName)
sendTextTemplate12(msg.to,"Bio:\n" + contact.statusMessage)
cl.sendImageWithURL(msg.to,image)
except:
pass
elif cmd == 'listblock':
if msg._from in admin:
blockedlist = cl.getBlockedContactIds()
kontak = cl.getContacts(blockedlist)
num=1
msgs="List Blocked"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\nTotal Blocked : %i" % len(kontak)
sendTextTemplate23(to, msgs)
elif "Getbio " in msg.text:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
try:
sendTextTemplate12(msg.to,contact.statusMessage)
except:
sendTextTemplate12(msg.to,"⟦ʙɪᴏ ᴇᴍᴘᴛʏ⟧")
elif text.lower() == 'kalender':
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = "❂➣ "+ hasil + " : " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\n\n❂➣ Jam : 🔹 " + timeNow.strftime('%H:%M:%S') + " 🔹"
sendTextTemplate23(msg.to, readTime)
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage(msg.to, None, contentMetadata={'mid': Amid}, contentType=13)
elif cmd == "myname":
if msg._from in admin:
contact = cl.getContact(sender)
sendTextTemplate12(to, "[ ᴅɪsᴘʟᴀʏ ɴᴀᴍᴇ ]\n{}".format(contact.displayName))
elif cmd == "mybio":
if msg._from in admin:
contact = cl.getContact(sender)
sendTextTemplate906(to, "[ sᴛᴀᴛᴜs ʟɪɴᴇ ]\n{}".format(contact.statusMessage))
elif cmd == "Picture":
if msg._from in admin:
contact = cl.getContact(sender)
cl.sendImageWithURL(to,"http://dl.profile.line-cdn.net/{}".format(contact.pictureStatus))
elif cmd == "myvideo":
if msg._from in admin:
contact = cl.getContact(sender)
cl.sendVideoWithURL(to,"http://dl.profile.line-cdn.net/{}/vp".format(contact.pictureStatus))
elif cmd == "mycover":
if msg._from in admin:
channel = cl.getProfileCoverURL(sender)
path = str(channel)
cl.sendImageWithURL(to, path)
elif cmd.startswith("bc "):
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
sendTextTemplate12(group," " + str(pesan))
elif cmd.startswith("bc2 "):
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
sendTextTemplate12(group, " ☕ ʙʀᴏᴀᴅᴄᴀsᴛ \n\n" + str(pesan))
time.sleep(1)
sendTextTemplate12(to,"Succes bc to {} group ".format(str(len(group))))
#broadcast
elif cmd.startswith("bct: "):
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
cl.sendMessage(group,"ʙʀᴏᴀᴅᴄᴀsᴛ\n\n " + str(pesan))
time.sleep(1)
sendTextTemplate12(to,"Succes bc to {} group ".format(str(len(group))))
elif cmd.startswith("bc1 "):
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
data = {
"type": "text",
"text": " ☕ ʙʀᴏᴀᴅᴄᴀsᴛ \n\n" + str(pesan),
"sentBy": {
"label": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://phoneky.co.uk/thumbs/screensavers/down/misc/skull_fQlRnZS6.gif",
"linkUrl": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg"}}
cl.postTemplate(group, data)
time.sleep(1)
sendTextTemplate12(to,"Succes bc to {} group ".format(str(len(group))))
elif cmd.startswith("kbc: "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
contact = cl.getContact(sender)
txt = text.replace(sep[0] + " ","")
friends = cl.getAllContactIds()
for friend in friends:
cl.sendMessage(friend, wait["broad"] + "\n" + format(str(txt) + "\n {} ".format(contact.displayName)))
time.sleep(1)
sendTextTemplate12(to,"Succes bc to {} friend ".format(str(len(friends))))
elif cmd == "Profile":
if msg._from in admin:
text = "~ Profile ~"
contact = cl.getContact(sender)
cover = cl.getProfileCoverURL(sender)
result = "╔══[ Details Profile ]"
result += "\n├≽ Display Name : @!"
result += "\n├≽ Mid : {}".format(contact.mid)
result += "\n├≽ Status Message : {}".format(contact.statusMessage)
result += "\n├≽ Picture Profile : http://dl.profile.line-cdn.net/{}".format(contact.pictureStatus)
result += "\n├≽ Cover : {}".format(str(cover))
result += "\n╚══[ Finish ]"
cl.sendImageWithURL(to, "http://dl.profile.line-cdn.net/{}".format(contact.pictureStatus))
cl.sendMentionWithFooter(to, text, result, [sender])
elif cmd.startswith("block"):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
cl.blockContact(ls)
cl.generateReplyMessage(msg.id)
cl.sendReplyMessage(msg.id, to, "sᴜᴋsᴇs ʙʟᴏᴄᴋ ᴋᴏɴᴛᴀᴋ" + str(contact.displayName) + "ᴍᴀsᴜᴋ ᴅᴀғᴛᴀʀ ʙʟᴏᴄᴋʟɪsᴛ")
elif cmd.startswith("addme "):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
cl.findAndAddContactsByMid(ls)
cl.generateReplyMessage(msg.id)
cl.sendReplyMessage(msg.id, to, "ʙᴇʀʜᴀsɪʟ ᴀᴅᴅ" + str(contact.displayName) + "ᴋᴜʀɪɴᴇᴍ ᴅᴜʟᴜ ʏᴀᴄʜ")
elif "Getmid " in msg.text:
if 'MENTION' in msg.contentMetadata.keys() != None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
try:
cl.sendMessage(msg.to,str(mention['M']))
except Exception as e:
pass
elif "Contact: " in msg.text:
if msg._from in admin:
mmid = msg.text.replace("Contact: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg.to, None, contentMetadata={'mid': mmid}, contentType=13)
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendImageWithURL(msg.to, image)
elif cmd.startswith("kontak"):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
cl.sendContact(to,str(ls))
elif cmd.startswith("ppvideo"):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
path = "http://dl.profile.line.naver.jp/{}/vp".format(contact.pictureStatus)
cl.sendVideoWithURL(to, str(path))
elif text.lower() == "dell":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
sendTextTemplate12(msg.to,"wis resik boooss")
except:
pass
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
sendTextTemplate23(msg.to, "☛ Nama : "+str(mi.displayName)+"\n☛ Mid : " +key1+"\n☛ Status Msg"+str(mi.statusMessage))
sendTextTemplate23(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
sendTextTemplate23(msg.to, "key Now「 " + str(wait["keyCommand"]) + " 」")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
sendTextTemplate23(msg.to, "ɢᴀɢᴀʟ ɴɢᴜʙᴀʜ ᴋᴇʏ")
else:
wait["keyCommand"] = str(key).lower()
sendTextTemplate906(msg.to, "sᴜᴋsᴇs ɢᴀɴᴛɪ ᴋᴇʏ「{}」".format(str(key).lower()))
#remot tagall
elif cmd.startswith("rtag: "):
Croot = msg.text.split(":")
Pepek = msg.text.replace(Croot[0] + ":"," ")
Peler = cl.getGroupIdsJoined()
Pokeh = Peler[int(Pepek)-1]
CokAnCok = cl.getGroup(Pokeh)
OlengKiller = [contact.mid for contact in CokAnCok.members]
Celik = len(OlengKiller)//19
for Manik in range(Celik+1):
txt = u''
s=0
Bohay=[]
for Jilat in CokAnCok.members[Manik*19 : (Manik+1)*19]:
Bohay.append(Jilat.mid)
RemotOlengKiller(Pokeh, Bohay)
sendTextTemplate12(msg.to, "Done, ᴋᴜʀᴀᴡᴀ Done\ndi Group: \n " + str(CokAnCok.name))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
wait["keyCommand"]=""
sendTextTemplate906(msg.to, "succes resset key command")
elif cmd == "/reboot":
if wait["selfbot"] == True:
if msg._from in admin:
sendTextTemplate1(msg.to, "ʀᴇsᴛᴀʀᴛ ʙᴏᴛ")
wait["restartPoint"] = msg.to
restartBot()
sendTextTemplate906(msg.to, "ᴅᴏɴᴇ ʙᴏs")
elif cmd == "uih":
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cover = cl.getProfileCoverURL(sender)
G = cl.getGroup(to)
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"backgroundColor": "#ff0000",
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 1
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "xxl",
"aspectMode": "cover",
"aspectRatio": "4:4",
"gravity": "bottom",
"action": {
"uri": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2",
"type": "uri",
}
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 2
"url": cover, #https://obs.line-scdn.net/{}".format(cl.getContact(sender).displayName),
"gravity": "bottom",
"size": "xxl",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"type": "uri",
}}],
"position": "absolute",
"cornerRadius": "8px",
"offsetTop": "5px",
"offsetStart": "5px",
"height": "110px",
"width": "110px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image", #Wall 2
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(mid).pictureStatus),
"gravity": "bottom",
"size": "xxl",
"aspectMode": "cover",
"aspectRatio": "2:2",
"offsetTop": "0px",
"action": {
"uri": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2",
"type": "uri",
}}],
"position": "absolute",
"cornerRadius": "8px",
"offsetTop": "5px",
"offsetStart": "5px",
"height": "110px",
"width": "110px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "🖐️ ɪᴢɪɴ ᴘᴀᴍɪᴛ",
"weight": "bold",
"color": "#ff0000",
"align": "center",
"size": "xxs",
"offsetTop": "3px"
}
],
"position": "absolute",
"cornerRadius": "7px",
"offsetTop": "9px",
#"backgroundColor": "#33ffff",
"offsetStart": "7px",
"height": "20px",
"width": "80px"
},
{
"type": "box",
"layout": "vertical",
"contents": [ #weh
{
"type": "image",
"url": "https://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "full",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/BomberBSSI",
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 0
},{
"type": "image",
"url": "https://i.ibb.co/ZHtFDts/20190427-185307.png", #chathttps://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/chat",
},
"flex": 0
}
],
"position": "absolute",
"offsetTop": "9px",
"offsetStart": "90px",
"height": "200px",
"width": "25px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "?? "+ datetime.strftime(timeNow,'%H:%M:%S'),
"weight": "bold",
"color": "#ff00ff",
"align": "center",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "7px",
"offsetTop": "87px",
#"backgroundColor": "#ff0000",
"offsetStart": "1px",
"height": "15px",
"width": "75px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "📆 "+ datetime.strftime(timeNow,'%Y-%m-%d'),
"weight": "bold",
"color": "#ff00ff",
"size": "xxs",
"offsetTop": "0px"
}
],
"position": "absolute",
"cornerRadius": "7px",
"offsetTop": "98px",
#"backgroundColor": "#0000ff",
"offsetStart": "7px",
"height": "15px",
"width": "90px"
}
],
#"backgroundColor": "#ff0000",
"paddingAll": "0px"
}
},
]
}
}
cl.postTemplate(to, data)
cl.leaveGroup(to)
elif text.lower() == "lvall":
if msg._from in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
print ("Pamit semua group")
elif text.lower() == "rjall":
if msg._from in admin:
ginvited = cl.getGroupIdsInvited()
if ginvited != [] and ginvited != None:
for gid in ginvited:
cl.rejectGroupInvitation(gid)
sendTextTemplate906(msg.to, "Succes Cancell {} Invite Grup".format(str(len(ginvited))))
else:
sendTextTemplate906(msg.to, "Nothing Invited")
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "🔽ʙᴏᴛ ʀᴜɴ : " +waktu(eltime)
sendTextTemplate23(msg.to,bot)
elif cmd == "listpending":
if wait["selfbot"] == True:
if msg.toType == 2:
group = cl.getGroup(to)
ret_ = "╭───「 Pending List 」"
no = 0
if group.invitee is None or group.invitee == []:
return cl.sendReplyMessage(msg_id, to, "Tidak ada pendingan")
else:
for pending in group.invitee:
no += 1
ret_ += "\n├≽ {}. {}".format(str(no), str(pending.displayName))
ret_ += "\n╰───「 Total {} Pending 」".format(str(len(group.invitee)))
#cl.sendReplyMessage(msg_id, to, str(ret_))
data = {
"type": "text",
"text": "{}".format(str(ret_)),
"sentBy": {
"label": " 💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"iconUrl": "https://cdn140.picsart.com/296661791123201.gif?c256x256",
"linkUrl": "line://nv/profilePopup/mid=u00e287effe898e54347d2ee6502d2ec2"
}
}
cl.postTemplate(to, data)
elif cmd == "listmem":
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
num = 0
ret_ = "╔══[ List Member ]"
for contact in group.members:
num += 1
ret_ += "\n╠ {}. {}".format(num, contact.displayName)
ret_ += "\n╚══[ Total {} Members]".format(len(group.members))
sendTextTemplate23(to, ret_)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = cl.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
sendTextTemplate12(msg.to, " •⌻「Grup Info」⌻•\n\n Nama Group : {}".format(G.name)+ "\nID Group : {}".format(G.id)+ "\nPembuat : {}".format(G.creator.displayName)+ "\nWaktu Dibuat : {}".format(str(timeCreated))+ "\nJumlah Member : {}".format(str(len(G.members)))+ "\nJumlah Pending : {}".format(gPending)+ "\nGroup Qr : {}".format(gQr)+ "\nGroup Ticket : {}".format(gTicket))
sendTextTemplate12(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
sendTextTemplate12(msg.to, str(e))
elif cmd.startswith("infogrup"):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(danil.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += "╔══「 Info Group 」"
ret_ += "\n┣[]► Nama Group : {}".format(G.name)
ret_ += "\n┣[]► ID Group : {}".format(G.id)
ret_ += "\n┣[]► Pembuat : {}".format(gCreator)
ret_ += "\n┣[]► Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\n┣[]► Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\n┣[]► Jumlah Pending : {}".format(gPending)
ret_ += "\n┣[]► Group Qr : {}".format(gQr)
ret_ += "\n┣[]► Group Ticket : {}".format(gTicket)
ret_ += "\n╚══「 Info Finish 」"
sendTextTemplate23(to, str(ret_))
except:
pass
elif cmd.startswith("infomem"):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = denal.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n┣[]► "+ str(no) + ". " + mem.displayName
sendTextTemplate12(to,"╔══「 Group Info 」\n┣[]► Group Name : " + str(G.name) + "\n┣══「Member List」" + ret_ + "\n╚══「Total %i Members」" % len(G.members))
except:
pass
elif cmd == "flist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getAllContactIds()
for i in gid:
G = cl.getContact(i)
a = a + 1
end = "\n"
ma += "╠[]► " + str(a) + ". " +G.displayName+ "\n"
sendTextTemplate23(msg.to,"╔══[ FRIEND LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Friends ]")
elif cmd == "addbot":
try:
cl.sendMessage(msg.to, "⏳ᴛᴜɴɢɢᴜ sᴇʟᴀᴍᴀ 5 ᴍᴇɴɪᴛ")
cl.findAndAddContactsByMid(Amid)
time.sleep(5)
cl.sendMessage(to, "✓sᴜᴄᴄᴇss")
except:
cl.sendMessage(to, "✓sᴜᴄᴄᴇss")
#NOTIF SMULE
elif "https://www.smule.com" in msg.text.lower():
if wait["smule"] == True:
nm = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
nm1 = re.findall(nm, text)
nm2 = []
for nm3 in nm1:
if nm3 not in nm2:
nm2.append(nm3)
for nm4 in nm2:
nm5 = nm4
api = BEAPI("oSqSQY5q7sk9") #isi api kamu
res = api.smulePost(nm5)
cl.sendImageWithURL(to,res["result"]["performance"]["cover_url"])
sendTextTemplate12(to,res["result"]["performance"]["title"])
if "video" in res["result"]["performance"]["type"]:
cl.sendVideoWithURL(to,res["result"]["performance"]["video_media_mp4_url"])
else:
cl.sendAudioWithURL(to,res["result"]["performance"]["media_url"])
elif "Invite " in msg.text:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(to,[target])
except:
pass
elif cmd == "gl":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "┣[]► " + str(a) + ". " +G.name+ "\n"
sendTextTemplate23(msg.to,"╔══[ GROUP LIST ]\n║\n"+ma+"║\n╚══[ Total「"+str(len(gid))+"」Groups ]")
elif cmd == "addbot":
try:
cl.sendMessage(msg.to, "⏳ᴛᴜɴɢɢᴜ sᴇʟᴀᴍᴀ 5 ᴍᴇɴɪᴛ")
cl.findAndAddContactsByMid(Amid)
time.sleep(5)
cl.findAndAddContactsByMid(Bmid)
time.sleep(5)
cl.findAndAddContactsByMid(Zmid)
time.sleep(5)
js1.findAndAddContactsByMid(mid)
cl.sendMessage(to, "✓sᴜᴄᴄᴇss")
js1.sendMessage(to, "✓sᴜᴄᴄᴇss")
except:
cl.sendMessage(to, "✓sᴜᴄᴄᴇss")
js1.sendMessage(to, "✓sᴜᴄᴄᴇss")
elif cmd == "qr":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
gurl = cl.reissueGroupTicket(msg.to)
sendTextTemplate12(msg.to,"line://ti/g/" + gurl)
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.preventedJoinByTicket == True:
X.preventedJoinByTicket = False
cl.updateGroup(X)
gurl = cl.reissueGroupTicket(msg.to)
sendTextTemplate12(msg.to, "Nama : "+str(X.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
sendTextTemplate906(msg.to, "Url Closed")
elif cmd == "reject":
if wait["selfbot"] == True:
if msg._from in admin:
ginvited = cl.getGroupIdsInvited()
if ginvited != [] and ginvited != None:
for gid in ginvited:
cl.rejectGroupInvitation(gid)
sendTextTemplate906(to, "ᴛᴏᴛᴀʟ {} ɢʀᴏᴜᴘ".format(str(len(ginvited))))
else:
sendTextTemplate12(to, "ʙᴇʀsɪʜ")
elif cmd.startswith("topnews"):
if msg._from in owner or msg._from in admin or msg._from in mid:
dpk=requests.get("https://newsapi.org/v2/top-headlines?country=id&apiKey=1214d6480f6848e18e01ba6985e2008d")
data=dpk.text
data=json.loads(data)
hasil = "Top News\n\n"
hasil += "(1) " + str(data["articles"][0]["title"])
hasil += "\n Sumber : " + str(data["articles"][0]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][0]["author"])
hasil += "\n Link : " + str(data["articles"][0]["url"])
hasil += "\n\n(2) " + str(data["articles"][1]["title"])
hasil += "\n Sumber : " + str(data["articles"][1]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][1]["author"])
hasil += "\n Link : " + str(data["articles"][1]["url"])
hasil += "\n\n(3) " + str(data["articles"][2]["title"])
hasil += "\n Sumber : " + str(data["articles"][2]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][2]["author"])
hasil += "\n Link : " + str(data["articles"][2]["url"])
hasil += "\n\n(4) " + str(data["articles"][3]["title"])
hasil += "\n Sumber : " + str(data["articles"][3]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][3]["author"])
hasil += "\n Link : " + str(data["articles"][3]["url"])
hasil += "\n\n(5) " + str(data["articles"][4]["title"])
hasil += "\n Sumber : " + str(data["articles"][4]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][4]["author"])
hasil += "\n Link : " + str(data["articles"][4]["url"])
hasil += "\n\n(6) " + str(data["articles"][5]["title"])
hasil += "\n Sumber : " + str(data["articles"][5]["source"]["name"])
hasil += "\n Penulis : " + str(data["articles"][5]["author"])
hasil += "\n Link : " + str(data["articles"][5]["url"])
path = data["articles"][3]["urlToImage"]
sendTextTemplate23(msg.to, str(hasil))
cl.sendImageWithURL(msg.to, str(path))
elif cmd.startswith('like '):
if msg._from in owner or msg._from in admin or msg._from in mid:
try:
typel = [1001,1002,1003,1004,1005,1006]
key = eval(msg.contentMetadata["MENTION"])
u = key["MENTIONEES"][0]["M"]
a = cl.getContact(u).mid
s = cl.getContact(u).displayName
hasil = cl.getHomeProfile(a)
st = hasil['result']['feeds']
for i in range(len(st)):
test = st[i]
result = test['post']['postInfo']['postId']
cl.likePost(str(sender), str(result), likeType=random.choice(typel))
cl.createComment(str(sender), str(result), 'Ikut bae lah')
sendTextTemplate12(receiver, 'Done Like+Comment '+str(len(st))+' Post From' + str(s))
except Exception as e:
cl.sendMessage(receiver, str(e))
#===========BOT UPDATE============#
elif cmd == "upgrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
sendTextTemplate906(msg.to,"☛ sᴇɴᴅ ᴘɪᴄᴛᴜʀᴇ")
elif cmd == "myfoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["RAfoto"][mid] = True
sendTextTemplate906(msg.to,"☛ sᴇɴᴅ ᴘɪᴄᴛᴜʀᴇ")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
sendTextTemplate12(msg.to,"Nama diganti jadi " + string + "")
elif cmd == "si":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
sendTextTemplate12(msg.to, "►sɪᴅᴇʀ ᴅɪʜɪᴅᴜᴘᴋᴀɴ►")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
sendTextTemplate12(msg.to, "►sɪᴅᴇʀ ᴅɪᴍᴀᴛɪᴋᴀɴ►")
else:
sendTextTemplate12(msg.to, ":")
#=========== [ Hiburan] ============#
elif cmd.startswith("cctv metro"):
if msg._from in admin:
ret_ = "Daftar Cctv Pantura\n"
ret_ += "248 = Alternatif - Cibubur\n119 = Ancol - bandara\n238 = Asia afrika - Bandung"
ret_ += "\n276 = Asia afrika - Sudirman\n295 = Bandengan - kota\n294 = Bandengan - Selatan"
ret_ += "\n102 = Buncit raya\n272 = Bundaran - HI\n93 = Cideng barat\n289 = Cikini raya"
ret_ += "\n175 = Ciloto - Puncak\n142 = Daan mogot - Grogol\n143 = Daan mogot - Pesing"
ret_ += "\n204 = Mangga besar\n319 = Margaguna raya\n326 = Margonda raya\n309 = Mas Mansyur - Tn. Abang"
ret_ += "\n64 = Matraman\n140 = Matraman - Salemba\n284 = Metro Pdk. Indah\n191 = MT Haryono - Pancoran\n160 = Pancoran barat"
ret_ += "\n331 = Pejompongan - Slipi\n332 = Pejompongan - Sudirman\n312 = Perempatan pramuka\n171 = Permata hijau - Panjang"
ret_ += "\n223 = Pramuka - Matraman\n222 = Pramuka raya\n314 = Pramuka raya - jl. Tambak\n313 = Pramuka - Salemba raya\n130 = Puncak raya KM84"
ret_ += "\n318 = Radio dalam raya\n328 = RS Fatmawati - TB\n274 = Senayan city\n132 = Slipi - Palmerah\n133 = Slipi - Tomang"
ret_ += "\n162 = S Parman - Grogol\n324 = Sudirman - Blok M\n18 = Sudirman - Dukuh atas\n325 = Sudirman - Semanggi\n112 = Sudirman - Setiabudi"
ret_ += "\n246 = Sudirman - Thamrin\n320 = Sultan agung - Sudirman\n100 = Suryo pranoto\n220 = Tanjung duren\n301 = Tol kebon jeruk"
ret_ += "\n41 = Tomang/Simpang\n159 = Tugu Pancoran\n205 = Yos Sudarso - Cawang\n206 = Yos Sudarso - Tj. Priuk"
ret_ += "\nUntuk melihat cctv,\nKetik Lihat (Nomer)"
sendTextTemplate23(to, ret_)
elif cmd.startswith("lihat"):
if msg._from in admin:
sep = msg.text.split(" ")
cct = msg.text.replace(sep[0] + " ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
r = s.get("http://lewatmana.com/cam/{}/bundaran-hi/".format(urllib.parse.quote(cct)))
soup = BeautifulSoup(r.content, 'html5lib')
try:
ret_ = "LIPUTAN CCTV TERKINI \nDaerah "
ret_ += soup.select("[class~=cam-viewer-title]")[0].text
ret_ += "\nCctv update per 5 menit"
vid = soup.find('source')['src']
ret = "Ketik Lihat nomer cctv selanjutnya"
sendTextTemplate23(to, ret_)
cl.sendVideoWithURL(to, vid)
except:
sendTextTemplate12(to, "🚦Data cctv tidak ditemukan!")
#============Comen Tag=========
elif cmd in ('sem','cok','purel','halo','tag'):
if msg._from in admin:
try:group = cl.getGroup(to);midMembers = [contact.mid for contact in group.members]
except:group = cl.getRoom(to);midMembers = [contact.mid for contact in group.contacts]
midSelect = len(midMembers)//20
for mentionMembers in range(midSelect+1):
no = 0
ret_ = "╭━━━━━╦════════╦━━━━━╮\n│╭━━━━━━━━━━━━━━━━━━━╮\n╠❂࿇➢Daftar_member\n│╰━━━━━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━━━━━╮"
dataMid = []
if msg.toType == 2:
for dataMention in group.members[mentionMembers*20 : (mentionMembers+1)*20]:
dataMid.append(dataMention.mid)
no += 1
ret_ += "\n"+"╠ {}. @!".format(str(no))
ret_ += "\n│╰━━━━━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━━━━━╮\n╠❂࿇➢Total :{}Tersangka\n│╰━━━━━━━━━━━━━━━━━━━╯\n╰━━━━━╩════════╩━━━━━╯".format(str(len(dataMid)))
cl.sendReplyMention(msg_id, to, ret_, dataMid)
else:
for dataMention in group.contacts[mentionMembers*20 : (mentionMembers+1)*20]:
dataMid.append(dataMention.mid)
no += 1
ret_ += "\n"+"╠ {}. @!".format(str(no))
ret_ += "\n│╰━━━━━━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━━━━━╮\n╠❂࿇➢Total:{}Tersangka\n│╰━━━━━━━━━━━━━━━━━━━━╯\n╰━━━━━╩════════╩━━━━━╯".format(str(len(dataMid)))
cl.sendReplyMention(msg_id, to, ret_, dataMid)
elif cmd == "hem" or text.lower() == 'cipok':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
sendTextTemplate12(msg.to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
elif (wait["NGENTOT"] == cmd):
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//20
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*20 : (a+1)*20]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Zero \n'
sendTextTemplate12(msg.to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
elif 'set tag: ' in cmd:
if msg._from in admin:
spl = cmd.replace('set tag: ','')
if spl in [""," ","\n",None]:
sendTextTemplate12(msg.to, "Gagal mengganti Set Tagall")
else:
wait["NGENTOT"] = spl
sendTextTemplate12(msg.to, "「Set Msg」\nSet Tagall diganti jadi :\n\n「{}」".format(str(spl)))
elif (wait["JANJUK"] == cmd):
if wait["selfbot"] == True:
if msg._from in admin:
try:group = cl.getGroup(to);midMembers = [contact.mid for contact in group.members]
except:group = cl.getRoom(to);midMembers = [contact.mid for contact in group.contacts]
midSelect = len(midMembers)//20
for mentionMembers in range(midSelect+1):
no = 0
ret_ = "╭━━━━━╦════╦━━━━━╮\n│╭━━━━━━━━━━━━━━━╮\n╠❂࿇➢Daftar_member\n│╰━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━╮"
dataMid = []
if msg.toType == 2:
for dataMention in group.members[mentionMembers*20 : (mentionMembers+1)*20]:
dataMid.append(dataMention.mid)
no += 1
ret_ += "\n"+"╠ {}. @!".format(str(no))
ret_ += "\n│╰━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━╮\n╠❂࿇➢Total :{}Tersangka\n│╰━━━━━━━━━━━━━━━╯\n╰━━━━━╩════╩━━━━━╯".format(str(len(dataMid)))
cl.sendReplyMention(msg_id, to, ret_, dataMid)
else:
for dataMention in group.contacts[mentionMembers*20 : (mentionMembers+1)*20]:
dataMid.append(dataMention.mid)
no += 1
ret_ += "\n"+"╠ {}. @!".format(str(no))
ret_ += "\n│╰━━━━━━━━━━━━━━━━╯\n│╭━━━━━━━━━━━━━━━╮\n╠❂࿇➢Total:{}Tersangka\n│╰━━━━━━━━━━━━━━━━╯\n╰━━━━━╩════╩━━━━━╯".format(str(len(dataMid)))
cl.sendReplyMention(msg_id, to, ret_, dataMid)
elif 'set tag2: ' in cmd:
if msg._from in admin:
spl = cmd.replace('set tag2: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Set Tagall")
else:
wait["JANJUK"] = spl
sendTextTemplate906(msg.to, "「Set Msg」\nSet Tagall diganti jadi :\n\n「{}」".format(str(spl)))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
sendTextTemplate906("u1638c6ae2cb49719c33ab35b56b884be", '.')
elapsed_time = time.time() - start
sendTextTemplate12(msg.to, "%s s" % (elapsed_time))
elif cmd.startswith("kutub"):
if msg._from in admin:
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
r = requests.get("https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=10&q={}&type=video&key=AIzaSyCxNem5XpY70Wi21g1VAWs36jLbPzjTJzc".format(str(search)))
data = r.text
a = json.loads(data)
if a["items"] != []:
ret_ = []
yt = []
for music in a["items"]:
ret_.append({
"type": "bubble",
"size": "micro",
"styles": {
"header": {
"backgroundColor": "#ffffff"
},
"body": {
"backgroundColor": "#ffffff",
"separator": True,
"separatorColor": "#000000"
},
"footer": {
"backgroundColor": "#ffffff",
"separator": True,
"separatorColor": "#000000"
}
},
"header": {
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "Youtube",
"weight": "bold",
"color": "#1C1C1C",
"size": "xxs"
}
]
},
"hero": {
"type": "image",
"url": "https://i.ytimg.com/vi/{}/maxresdefault.jpg".format(music['id']['videoId']),
"size": "full",
"aspectRatio": "20:13",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "https://www.youtube.com/watch?v=%s" % music['id']['videoId']
}
},
"body": {
"type": "box",
"spacing": "xs",
"layout": "horizontal",
"contents": [{
"type": "box",
"spacing": "none",
"flex": 1,
"layout": "vertical",
"contents": [{
"type": "image",
"url": "https://cdn2.iconfinder.com/data/icons/social-icons-circular-color/512/youtube-512.png",
"aspectMode": "cover",
"gravity": "bottom",
"size": "xxs",
"aspectRatio": "1:1",
"action": {
"type": "uri",
"uri": "https://www.youtube.com/watch?v=%s" % music['id']['videoId']
}
}]
}, {
"type": "separator",
"color": "#000000"
}, {
"type": "box",
"contents": [{
"type": "text",
"text": "Title",
"color": "#000000",
"size": "xxs",
"weight": "bold",
"flex": 1,
"gravity": "top"
}, {
"type": "text",
"text": "%s" % music['snippet']['title'],
"color": "#000000",
"size": "xxs",
"weight": "bold",
"flex": 3,
"wrap": True,
"gravity": "top"
}],
"flex": 2,
"layout": "vertical"
}]
},
"footer": {
"type": "box",
"layout": "vertical",
"contents": [{
"type": "box",
"layout": "horizontal",
"contents": [{
"type": "button",
"flex": 2,
"style": "primary",
"color": "#000000",
"height": "sm",
"action": {
"type": "uri",
"label": "Page",
"uri": "line://app/1602687308-GXq4Vvk9?type=text&text=ytmp3%20https://www.youtube.com/watch?v={}".format(str(music['id']['videoId']))
}
}, {
"flex": 3,
"type": "button",
"margin": "sm",
"style": "primary",
"color": "#000000",
"height": "sm",
"action": {
"type": "uri",
"label": "Mp3",
"uri": "line://app/1602687308-GXq4Vvk9?type=text&text=ytmp3%20https://www.youtube.com/watch?v={}".format(str(music['id']['videoId']))
}
}]
}, {
"type": "button",
"margin": "sm",
"style": "primary",
"color": "#000000",
"height": "sm",
"action": {
"type": "uri",
"label": "Mp4",
"uri": "line://app/1602687308-GXq4Vvk9?type=text&text=ytmp4%20https://www.youtube.com/watch?v={}".format(str(music['id']['videoId']))
}
}]
}
}
)
yt.append('https://www.youtube.com/watch?v=' +music['id']['videoId'])
k = len(ret_)//10
for aa in range(k+1):
data = {
"type": "flex",
"altText": "Youtube",
"contents": {
"type": "carousel",
"contents": ret_[aa*10 : (aa+1)*10]
}
}
cl.postTemplate(to, data)
elif cmd.startswith("ytmp4 "):
try:
sendTextTemplate12(to, "Waitting...")
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
dl = str(key)
vid = pafy.new(dl)
stream = vid.streams
for s in stream:
start = timeit.timeit()
vin = s.url
cl.sendVideoWithURL(to,vin)
except Exception as e:
return sendTextTemplate12(to,"YOUTUBE PLEASE UPDATE\n\n"+str(e))
elif cmd.startswith("ytmp3 "):
try:
sendTextTemplate12(to, "Waitting...")
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
vid = pafy.new(key)
audio = vid.audiostreams
for audio in audios:
audio = audio.url
cl.sendAudioWithURL(to,audio)
except Exception as e:
return sendTextTemplate12(to,"YOUTUBE PLEASE UPDATE\n\n"+str(e))
elif cmd.startswith("mp3: "):
# if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
sendTextTemplate23(msg.to, "📀ᴍᴜsɪᴋ ᴀᴜᴅɪᴏ")
cl.sendAudioWithURL(msg.to, me)
except Exception as e:
cl.sendMessage(msg.to,str(e))
elif cmd.startswith("ytlink "):
sendTextTemplate906(to, "Waiting...")
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
params = {"search_query": search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(set["userAgent"])
r = web.get("https://www.youtube.com/results", params = params)
soup = BeautifulSoup(r.content, "html5lib")
ret_ = "╭━━━━━[ Youtube link di tampilkan ]"
datas = []
for data in soup.select(".yt-lockup-title > a[title]"):
if "&lists" not in data["href"]:
datas.append(data)
for data in datas:
ret_ += "\n"+St+"[ {} ]".format(str(data["title"]))
ret_ += "\n"+St+"https://www.youtube.com{}".format(str(data["href"]))
ret_ += "\n╰━━━━━━━━[ Total {} link]━━━━━".format(len(datas))
cl.sendVideoWithURL(msg.to, me)
cl.sendAudioWithURL(msg.to, me)
cl.sendMessage(Id, to, str(ret_))
elif cmd == "news":
r=requests.get("https://newsapi.org/v2/top-headlines?country=id&apiKey=1214d6480f6848e18e01ba6985e2008d")
data = r.text
x = json.loads(data)
if x["articles"] != []:
ret_ = []
for tube in x["articles"]:
if len(ret_) >= 20:
pass
else:
ret_.append({"type":"bubble","styles":{"header":{"backgroundColor":"#002321","separator":True,"separatorColor":"#23FF00"},"body":{"backgroundColor":"#002321","separator":True,"separatorColor":"#23FF00"},"footer":{"backgroundColor":"#002321","separator":True,"separatorColor":"#23FF00"}},"hero":{"type":"image","url":tube["urlToImage"],"size":"full","aspectRatio":"20:13","aspectMode":"cover"},"body":{"type":"box","spacing":"md","layout":"vertical","contents":[{"type":"box","spacing":"none","layout":"horizontal","contents":[{"type":"button","flex":2,"style":"primary","color":"#002321","height":"sm","action":{"type":"uri","label":"SITUS","uri":"https://"+tube["source"]["name"]}},{"type":"separator","color":"#302100"},{"type":"button","flex":2,"style":"primary","color":"#002321","height":"sm","action":{"type":"uri","label":"RADAR","uri":tube["url"]}}]}]},"footer":{"type":"box","layout":"vertical","contents":[{"type":"box","layout":"vertical","contents":[{"type":"text","text":"NEWS","color":"#35FF00","size":"md","weight":"bold","align":"center"},{"type":"separator","color":"#81FF00"},{"type":"text","text":tube["title"],"color":"#35FF00","size":"xxs"}]},{"type":"button","margin":"sm","style":"primary","color":"#E51A00","height":"sm","action":{"type":"uri","label":"🅿🆁🅰🅽🅺🅱🅾🆃🆂","uri":"https://bit.ly/2xbVxlh"}}]}})
k = len(ret_)//10
for aa in range(k+1):
cl.postTemplate(to, data)(to, {"type":"flex","altText":"{} semok".format(meProfile.displayName),"contents":{"type":"carousel","contents":ret_[aa*10:(aa+1)*10]}})
elif cmd.startswith("s.youtube "):
sep = text.split(" ")
msgg = text.replace(sep[0] + " ","")
cl.sendImageURL(to,"http://api.screenshotmachine.com/?key=3ae749&dimension=1920x1080&format=jpg&url=https://www.youtube.com/results?search_query=/{}".format(msgg))
elif cmd == "tube":
imgs = "https://flamingtext.com/net-fu/proxy_form.cgi?imageoutput=true&script=chrominium-logo&text=PrankBots&doScale=true&scaleWidth=430&scaleHeight=100"
r = requests.get("https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=30&q=PrankBots&type=video&key=AIzaSyB7Zb9VteS6NsZFPY41FhGRzvMzAc3HBpM")
data = r.text
x = json.loads(data)
if x["items"] != []:
ret_ = []
for tube in x["items"]:
if len(ret_) >= 20:
pass
else:
ret_.append({"type":"bubble","styles":{"header":{"backgroundColor":"#000080"},"body":{"backgroundColor":"#000000"},"footer":{"backgroundColor":"#000000","separator":True,"separatorColor":"#FF000B"}},"header":{"type":"box","layout":"vertical","contents":[{"type":"image","url":"https://i.ibb.co/T4wVtG2/20210109-123328.jpg","size":"full","aspectRatio":"3:1"}]},"hero":{"type":"image","url":"https://i.ytimg.com/vi/{}/maxresdefault.jpg".format(tube['id']['videoId']),"size":"full","aspectRatio":"4:5","aspectMode":"cover"},"body":{"type":"box","spacing":"md","layout":"horizontal","contents":[{"type":"box","spacing":"none","layout":"vertical","contents":[{"type":"image","url":"https://raw.githubusercontent.com/prankbots/logo/master/youtube.png","aspectRatio":"1:1","flex":1,"gravity":"center"},{"type":"image","url":"https://i.ibb.co/T4wVtG2/20210109-123328.jpg","aspectRatio":"1:1","flex":1,"gravity":"top"}]},{"type":"box","contents":[{"type":"text","text":"🅹🆄🅳🆄🅻🆅🅸🅳🅴🅾","color":"#FF0031","size":"md","weight":"bold","flex":1,"gravity":"top","align":"center"},{"type":"text","text":"%s"%tube['snippet']['title'],"color":"#00FF30","size":"xs","weight":"bold","flex":3,"wrap":True,"gravity":"top"},{"type":"separator","margin":"lg","color":"#81FF00"},{"type":"text","text":"🅿🅴🅽🅲🅰🆁??🅰🅽","color":"#FF0031","size":"md","weight":"bold","flex":1,"gravity":"top","align":"center"},{"type":"image","url":imgs,"size":"full","aspectRatio":"3:1","gravity":"top"}],"flex":2,"layout":"vertical"}]},"footer":{"type":"box","layout":"vertical","contents":[{"type":"box","layout":"horizontal","contents":[{"type":"button","flex":2,"style":"primary","color":"#009705","height":"sm","action":{"type":"uri","label":"MP3","uri":"line://app/1623679774-k9nBDB6b?type=text&text=convertermp3%20https://www.youtube.com/watch?v={}".format(str(tube['id']['videoId']))}},{"flex":3,"type":"button","margin":"sm","style":"primary","color":"#0009A8","height":"sm","action":{"type":"uri","label":"VIDEO","uri":"https://www.youtube.com/watch?v={}".format(str(tube['id']['videoId']))}},{"flex":2,"type":"button","margin":"sm","style":"primary","color":"#009705","height":"sm","action":{"type":"uri","label":"MP4","uri":"line://app/1623679774-k9nBDB6b?type=text&text=convertermp4%20https://www.youtube.com/watch?v={}".format(str(tube['id']['videoId']))}}]},{"type":"button","margin":"sm","style":"primary","color":"#E51A00","height":"sm","action":{"type":"uri","label":"💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍","uri":"https://bit.ly/2xbVxlh"}}]}})
k = len(ret_)//10
for aa in range(k+1):
data = {
"type": "flex",
"altText": "{} mengirim kont".format(cl.getContact(sender).displayName),
"contents": {
"type": "carousel",
"contents": ret_[aa*10 : (aa+1)*10]
}
}
cl.postTemplate(to, data)
elif cmd.startswith("clone "):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
cl.cloneContactProfile(contact)
ryan = cl.getContact(contact)
zx = ""
zxc = ""
zx2 = []
xpesan = "「 Clone Profile 」\nTarget nya "
ret_ = "Berhasil clone profile target"
ry = str(ryan.displayName)
pesan = ''
pesan2 = pesan+"@x \n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':ryan.mid}
zx2.append(zx)
zxc += pesan2
text = xpesan + zxc + ret_ + ""
sendTextTemplate12(to, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except:
sendTextTemplate12(msg.to, "Gagal clone profile")
elif text.lower() == 'restore':
if msg._from in admin:
try:
clProfile.displayName = str(myProfile["displayName"])
clProfile.statusMessage = str(myProfile["statusMessage"])
clPofile.pictureStatus = str(myProfile["pictureStatus"])
cl.updateProfileAttribute(8, clProfile.pictureStatus)
cl.updateProfile(clProfile)
sendTextTemplate12(msg.to, sender, "「 Restore Profile 」\nNama ", " \nBerhasil restore profile")
except:
sendTextTemplate12(msg.to, "Gagal restore profile")
elif cmd.startswith("ytb"):
if msg._from in admin:
sep = text.split(" ")
search = text.replace(sep[0] + " ","")
r = requests.get("https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=10&q={}&type=video&key=AIzaSyCxNem5XpY70Wi21g1VAWs36jLbPzjTJzc".format(str(search)))
data = r.text
a = json.loads(data)
if a["items"] != []:
ret_ = []
yt = []
for music in a["items"]:
ret_.append({
"type": "bubble",
"size": "kilo",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ytimg.com/vi/{}/maxresdefault.jpg".format(tube['id']['videoId']),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "1:1",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "%s" % tube['snippet']['title'],
"size": "sm",
"color": "#ffffff",
"weight": "bold",
"align": "center"
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "👍",
"size": "sm",
"color": "#ffffff",
"weight": "bold",
"align": "center"
},
{
"type": "text",
"text": "💬",
"align": "center"
},
{
"type": "text",
"text": "⏱",
"align": "center"
},
{
"type": "text",
"text": "🎦",
"align": "center"
}
],
"borderWidth": "3px",
"borderColor": "#ffff00"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "deskripsi\n%s" % tube['snippet']['description'],
"size": "xs",
"wrap": True
}
],
"backgroundColor": "#ffff00",
"borderColor": "#ffff00",
"borderWidth": "2px",
"height": "80px"
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "VIDEO",
"size": "md",
"color": "#ffffff",
"weight": "bold",
"align": "center",
"action": {
"type": "uri",
"label": "action",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=converter%20https://www.youtube.com/watch?v={}".format(str(tube['id']['videoId']))
}
},
{
"type": "text",
"text": "MP3",
"size": "md",
"color": "#ffffff",
"weight": "bold",
"align": "center",
"action": {
"type": "uri",
"label": "action",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=converter%20https://www.youtube.com/watch?v={}".format(str(tube['id']['videoId']))
}
}
]
}
],
"position": "absolute",
"offsetBottom": "0px",
"offsetStart": "0px",
"offsetEnd": "0px",
"backgroundColor": "#000000aa",
"paddingAll": "20px",
"paddingTop": "18p",
"borderColor": "#ff0000",
"borderWidth": "2px",
"height": "190px"
}
],
"paddingAll": "0px",
"borderColor": "#ff0000",
"borderWidth": "2px"
}
})
k = len(ret_)//10
for aa in range(k+1):
data = {
"type": "flex",
"altText": "youtube",
"contents": {
"type": "carousel",
"contents": ret_[aa*10 : (aa+1)*10]
}
}
cl.postTemplate(to, data)
elif cmd.startswith("converter ") or cmd.startswith("yt-info "):
try:
sendTextTemplate906(to, "Waitting...")
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
vid = pafy.new(key)
stream = vid.streams
audio = vid.audiostreams
for s in stream:
start = timeit.timeit()
vin = s.url
for audio in audios:
audio = audio.url
judul = '•Judul: ' + str(vid.title)
author = '•Creator: ' + str(vid.author)
durasi = '•Durasi: ' + str(vid.duration)
suka = '•Like: ' + str(vid.likes)
dislike = '•Dislike : ' +str(vid.dislikes)
rating = '•rating: ' + str(vid.rating)
tonton = '•Ditonton : ' +str(vid.viewcount)+ 'x'
kategori = '•kategori: ' + vid.category
penerbit = '•Penerbit : ' +str(vid.username)
img = "https://i3.ytimg.com/vi/{}/maxresdefault.jpg".format(str(vid.videoid))
data = {
"type": "flex",
"altText": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"contents": {
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": img,
"size": "full",
"aspectMode": "cover",
"aspectRatio": "1:1",
"gravity": "top",
"action": {
"type": "uri",
"label": "action",
"uri": vin
}
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": judul,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px",
"text": author
},
{
"type": "text",
"text": durasi,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"text": suka,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"text": dislike,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"text": rating,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"text": kategori,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"text": tonton,
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px"
},
{
"type": "text",
"color": "#ffffff",
"align": "start",
"size": "xs",
"offsetTop": "3px",
"text": penerbit
}
],
"position": "absolute",
"cornerRadius": "20px",
"offsetTop": "5px",
"backgroundColor": "#000080aa",
"offsetStart": "5px",
"height": "150px",
"width": "250px",
"borderColor": "#ffff00",
"borderWidth": "3px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "klik to play video",
"color": "#ffffff",
"align": "start",
"size": "md",
"offsetTop": "3px",
"action": {
"type": "uri",
"label": "action",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=convertermp4%20https://www.youtube.com/watch?v={}".format(str(vid.videoid))
}
}
],
"position": "absolute",
"cornerRadius": "20px",
"offsetTop": "350px",
"backgroundColor": "#000080",
"height": "50px",
"borderColor": "#ffff00",
"borderWidth": "2px",
"offsetStart": "90px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "klik to pay audio",
"color": "#ffffff",
"align": "start",
"size": "md",
"offsetTop": "3px",
"action": {
"type": "uri",
"label": "action",
"uri": "line://app/1623679774-k9nBDB6b?type=text&text=convertermp3%20https://www.youtube.com/watch?v={}".format(str(vid.videoid))
}
}
],
"position": "absolute",
"cornerRadius": "20px",
"offsetTop": "300px",
"backgroundColor": "#000080",
"height": "50px",
"borderColor": "#ffff00",
"borderWidth": "3px",
"offsetStart": "90px"
}
],
"paddingAll": "0px",
"borderColor": "#ffff00",
"borderWidth": "2px"
}
}
}
cl.postTemplate(to, data)
except Exception as e:
return sendTextTemplate906(to,"SABAR....\n\n"+str(e))
elif cmd == "galeri":
sendTextTemplate906(to, plate["galery"])
elif cmd.startswith("convertermp4 "):
try:
sendTextTemplate906(to, "Waitting...")
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
dl = str(key)
vid = pafy.new(dl)
stream = vid.streams
for s in stream:
start = timeit.timeit()
vin = s.url
cl.sendVideoWithURL(to,vin)
except Exception as e:
return sendTextTemplate906(to,"YOUTUBE PLEASE UPDATE\n\n"+str(e))
elif cmd.startswith("convertermp3 "):
try:
sendTextTemplate906(to, "Waitting...")
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
vid = pafy.new(key)
audio = vid.audiostreams
for audio in audios:
audio = audio.url
cl.sendAudioWithURL(to,audio)
except Exception as e:
return sendTextTemplate906(to,"YOUTUBE PLEASE UPDATE\n\n"+str(e))
elif cmd.startswith("smule "):
if msg._from in admin:
proses = text.split(" ")
urutan = text.replace(proses[0] + " ","")
count = urutan.split(" ")
search = str(count[0])
r = requests.get("https://www.smule.com/"+search+"/performances/json")
data = json.loads(r.text)
if len(count) == 1:
no = 0
ret_ = "╔══[ ✯ ʟɪsᴛsᴍᴜʟᴇ ✯ ]"
for aa in data["list"]:
no += 1
ret_ += "\n╠•➣" + str(no) + ". " + str(aa["title"])
ret_ += "\n╚══[ ✯ʟɪsᴛsᴍᴜʟᴇ✯ ]"
ret_ += "\nᴋᴇᴛɪᴋ: sᴍᴜʟᴇ{}ɴᴏᴍᴏʀ".format(str(search))
sendTextTemplate23(msg.to,ret_)
elif len(count) == 2:
try:
num = int(count[1])
b = data["list"][num - 1]
smule = str(b["web_url"])
c = "\n╠•➣ᴊᴜᴅᴜʟ ʟᴀɢᴜ: "+str(b["title"])
c += "\n╠•➣ᴄʀᴇᴀᴛᴏʀ: "+str(b["owner"]["handle"])
c += "\n╠•➣ʟɪᴋᴇ: "+str(b["stats"]["total_loves"])+" like"
c += "\n╠•➣ᴄᴏᴍᴍᴇɴᴛ: "+str(b["stats"]["total_comments"])+" comment"
c += "\n╠•➣sᴛᴀᴛᴜs ᴏᴄ: "+str(b["message"])
c += "\n╠•➣ᴅɪ ᴅᴇɴɢᴀʀᴋᴀɴ: {}".format(b["stats"]["total_listens"])+" orang"
c += "\n╚══[ ✯ᴡᴀɪᴛ ᴀᴜᴅɪᴏ ᴏʀ ᴠɪᴅᴇᴏ✯ ]"
hasil = "╔══[ ✯ ᴅᴇᴛᴀɪʟsᴍᴜʟᴇ ✯ ]"+str(c)
dl = str(b["cover_url"])
data = {
"type": "flex",
"altText": "Audio Smule",
"contents": {
"styles": {
"body": {
"backgroundColor": "#0000ff" #999999"
},
"footer": {
"backgroundColor": "#0000ff" #2f2f4f" #0000" #cc9999"
}
},
"type": "bubble",
"size": "micro",
"body": {
"contents": [
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},{
"contents": [
{
"text": "⚡S̸͟͞E̸͟͞L̸͟͞F̸͟͞B̸͟͞O̸͟͞T̸͟͞ T̸͟͞E̸͟͞M̸͟͞P̸͟͞L̸͟͞A̸͟͞T̸͟͞E̸͟͞⚡",
"size": "xxs",
"align": "center",
"color": "#ffff00",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "image",
"url": "https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcQtKJ9DZZjfaSZtDWapDmdO1bVccjThrGsrLARUW0ZVu2SqHTTI",
"size": "xl",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 1
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "image",
"url": dl, #"https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/BomberBSSI",
},
"flex": 1
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
},
{
"contents": [{"type":"separator","color": "#33ffff"},{"contents": [{"text": "🎙️ᴊᴇᴍᴘᴏʟ: "+str(b["stats"]["total_loves"])+" like","size": "xxs","color": "#00ff00","wrap": True,"weight": "bold","type": "text"},{"text": "🎙️ɴʏɪᴍᴀᴋ: {}".format(b["stats"]["total_listens"])+" orang","size": "xxs","color": "#00ff00","wrap": True,"weight": "bold","type": "text"},{"text": "🎙️ᴠᴏᴄᴀʟ: "+str(b["owner"]["handle"]),"size": "xxs","color": "#00ff00","wrap": True,"weight": "bold","type": "text"},{"text": "🎙️"+str(b["title"]),"size": "xxs","color": "#00ff00","wrap": True,"weight": "bold","type": "text"}],"type": "box","spacing": "xs","layout": "vertical"
},{"type": "separator","color": "#33ffff"}],"type": "box","spacing": "xs","layout": "horizontal" },{"type": "separator","color": "#33ffff"},{
"contents": [
{
"type": "separator",
"color": "#33ffff"
},
{
"type": "image",
"url": "https://i.ibb.co/XWQd8rj/20190625-201419.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "https://youtube.com"
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/kSMSnWn/20190427-191235.png", #camerahttps://i.ibb.co/hVWDsp8/20190428-232907.png", #smulehttps://i.ibb.co/8YfQVtr/20190427-185626.png", #callinghttps://kepriprov.go.id/assets/img/icon/phone.png", #phone
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/camera/"
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/CntKh4x/20190525-152240.png", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/BomberBSSI",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 1
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/1sGhJdC/20190428-232658.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/timeline"
},
"flex": 1
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#33ffff"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
}
}
}
cl.postTemplate(to, data)
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
r = s.get("https://sing.salon/smule-downloader/?url=https://www.smule.com{}".format(urllib.parse.quote(smule)))
data = BeautifulSoup(r.content, 'html5lib')
get = data.select("a[href*=https://www.smule.com/redir?]")[0]
title = data.findAll('h2')[0].text
imag = data.select("img[src*=https://www.smule.com/redir?]")[0]
if 'Smule.m4a' in get['download']:
cl.sendAudioWithURL(msg.to, get['href'])
else:
cl.sendVideoWithURL(msg.to, get['href'])
except Exception as e:
cl.sendReplyMessage(msg.id,msg.to,"Result Error:\n"+str(e))
#===========COMEN PANGGILAN======
elif cmd.startswith("tag: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
sendTextTemplate906(msg.to,"Total Spamtag Diubah Menjadi " +strnum)
elif cmd.startswith("call: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
sendTextTemplate906(msg.to,"Total Spamcall Diubah Menjadi " +strnum)
elif cmd.startswith("stag"):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(wait["limit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage901(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
sendTextTemplate906(msg.to,"Jumlah melebihi 1000")
elif msg.text.lower().startswith("naik "):
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
jmlh = int(wait["limit"])
sendTextTemplate906(msg.to, "Succes {} Call Grup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
mids = [contact.mid]
cl.acquireGroupCallRoute(msg.to)
cl.inviteIntoGroupCall(msg.to,mids)
except Exception as e:
cl.sendMessage(msg.to,str(e))
else:
sendTextTemplate12(msg.to,"")
elif cmd == "cpg":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
jmlh = int(wait["limit"])
sendTextTemplate12(msg.to, "Sukses Call {} diGrup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
sendTextTemplate12(msg.to,"Jumlah melebihi batas")
elif cmd.startswith("scallto "):
dan = text.split(" ")
num = int(dan[1])
ret_ = "╭───[ Spamcall Mention ]"
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
for var in range(0,num):
group = cl.getGroup(to)
members = [ls]
cl.acquireGroupCallRoute(to)
cl.inviteIntoGroupCall(to, contactIds=members)
ret_ += "\n├ @!"
ret_ += "\n╰───[ Total {} Spam call]".format(str(dan[1]))
sendMention(to, ret_, lists)
#==========Comen Spam==={{{
elif cmd.startswith("unsend "):
if msg._from in admin:
sep = text.split(" ")
args = text.replace(sep[0] + " ","")
ttl = "「UNSEND」"
mes = int(sep[1])
M = cl.getRecentMessageV2(to, 1001)
MId = []
for ind,i in enumerate(M):
if ind == 0:
pass
else:
if i._from == cl.profile.mid:
MId.append(i.id)
if len(MId) == mes:
break
def unsMes(id):
cl.unsendMessage(id)
for i in MId:
thread1 = threading.Thread(target=unsMes, args=(i,))
thread1.daemon = True
thread1.start()
thread1.join()
cl.unsendMessage(msg.id)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
sendTextTemplate906(msg.to, "「Diaktifkan」\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
sendTextTemplate906(msg.to, "「Dinonaktifkan」\n" + msgs)
#===============Coment kickall============
elif ("Kiss" in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in admin:
try:
cl.kickoutFromGroup(to,[target])
except:
sendTextTemplate906(msg.to,"Sorry kaki saya struk..")
elif "Gkick " in msg.text:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.cancelGroupInvitetion(msg.to,[target])
except:
pass
elif ("Awas" in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ticket)
cl.kickoutFromGroup(msg.to, [target])
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendMessage(to, "Kakiku Struk Boss")
elif cmd == "gasken" or text.lower() == '.kuy':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
if group.invitee is None:
cl.sendMessage(op.message.to, "Kosong.....")
else:
nama = [contact.mid for contact in group.invitee]
for x in nama:
if x not in admin:
klist=[mid]
cl.cancelGroupInvitation(msg.to, [x])
cl.cancelGroupInvitation(msg.to, [x])
time.sleep(0.00001)
print (msg.to, [x])
if msg._from in admin:
gs = cl.getGroup(msg.to)
targets = []
for x in gs.members:
targets.append(x.mid)
for a in admin:
if a in targets:
try:
targets.remove(a)
except:
pass
for target in targets:
try:
klist=[mid]
cl.cancelGroupInvitation(msg.to, [x])
cl.kickoutFromGroup(msg.to,[target])
time.sleep(0.00001)
print (msg.to,[g.mid])
except:
pass
elif cmd == ".jilat":
if wait["selfbot"] == True:
cl.sendMessage(msg.to, "ngeblank ya brooo maaf broo layar hpnya aq pinjam bentar ❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.👿.👿.👿 ❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.👿.👿.👿.\n❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.👿.👿.👿.\n❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.👿.👿.👿.")
elif cmd == ".pelakor":
if wait["selfbot"] == True:
cl.sendMessage(msg.to, "nyaman nyaman nyaman nyaman nyaman nyaman ❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.👿.👿.👿 ❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.👿.👿.👿.\n❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.??.👿.👿.\n❌.👁️.★.★.★.👁️.❌.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.0.S.1.☆.👿.??.👿.")
#kickall
if "Tolak" in msg.text:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _dn in gMembMids:
if _dn not in Bots:
random.choice(KAC).cancelGroupInvitetion(to,[_dn])
if "!ngewe" in msg.text:
if msg._from in admin:
start = time.time()
time.sleep(0.00001)
nk0 = msg.text.replace("!ngewe","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
pass
else:
for target in targets:
if target not in Bots:
try:
wait["blacklist"][target] = True
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif msg.text in ["Angewe"]:
if msg._from in admin:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
cl.acceptGroupInvitationByTicket(msg.to,Ticket)
group = cl.getGroup(msg.to)
targets = [contact.mid for contact in group.members]
for target in targets:
time.sleep(0.2)
if target not in Bots:
if target not in admin:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
except:
pass
elif cmd == "pites" or text.lower() == 'jilat':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
if group.invitee is None:
cl.sendMessage(op.message.to, "Kosong.....")
else:
nama = [contact.mid for contact in group.invitee]
for x in nama:
if x not in Bots:
klist=[mid]
cl.cancelGroupInvitetion(to, [x])
time.sleep(0.00001)
print (msg.to, [x])
if msg._from in admin:
gs = cl.getGroup(msg.to)
targets = []
for x in gs.members:
targets.append(x.mid)
for a in admin:
if a in targets:
try:
targets.remove(a)
except:
pass
for target in targets:
try:
klist=[mid]
cl.kickoutFromGroup(to,[target])
time.sleep(0.00001)
print (msg.to,[g.mid])
except:
pass
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
cl.deleteSelfFromChat(msg.to)
elif msg.text in ["Asange"]:
if msg._from in admin:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
group = cl.getGroup(msg.to)
targets = [contact.mid for contact in group.invitee]
for target in targets:
time.sleep(0.4)
if target not in Bots:
if target not in admin:
try:
random.choice(KAC).cancelGroupInvitetion(msg.to,[target])
except:
pass
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = True
vl.updateGroup(G)
Ticket = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "❂➢ʙʏᴇ ʙʏᴇ ғᴀᴍs "+str(G.name))
cl.deleteSelfFromChat(msg.to)
#=========COMEN RESPON======#
elif msg.text in ["Jepit"]:
if msg._from in admin:
wait["Invi"] = True
sendTextTemplate906(msg.to,"sᴇɴᴅ ᴄᴏɴᴛᴀᴄᴛ")
elif "Rusak" in msg.text:
if msg._from in admin:
if msg.toType == 2:
print("ok")
_name = msg.text.replace("Rusak","")
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ditemukan.")
else:
for target in targets:
if not target in admin and Bots:
try:
klist=[cl]
kicker=random.choice(klist)
kicker.kickoutFromGroup(to,[target])
print (msg.to,[g.mid])
except Exception as e:
break
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
wait["detectMention2"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "respon2 on" or text.lower() == 'respon2 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention2"] = True
wait["detectMention"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ2 ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "respon2 off" or text.lower() == 'respon2 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention2"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ2 ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "respon3 on" or text.lower() == 'respon3 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention3"] = True
wait["detectMention2"] = False
wait["detectMention"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ3 ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "respon3 off" or text.lower() == 'respon3 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention3"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ3 ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "respon4 on" or text.lower() == 'respon4 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention4"] = True
wait["detectMention3"] = False
wait["detectMention2"] = False
wait["detectMention"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ4 ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "respon4 off" or text.lower() == 'respon4 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention4"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ4 ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "respon5 on" or text.lower() == 'respon5 on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention5"] = True
wait["detectMention4"] = False
wait["detectMention3"] = False
wait["detectMention2"] = False
wait["detectMention"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ5 ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "respon5 off" or text.lower() == 'respon5 off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention5"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ5 ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴᴛᴀɢ ᴋɪᴄᴋ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴᴛᴀɢ ᴋɪᴄᴋ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
sendTextTemplate906(msg.to,"ʀᴇsᴘᴏɴ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
sendTextTemplate906(msg.to,"ᴀᴜᴛᴏᴊᴏɪɴ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
sendTextTemplate906(msg.to,"ᴀᴜᴛᴏᴊᴏɪɴ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "notif on" or text.lower() == 'notif on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["nCall"] = True
sendTextTemplate906(msg.to,"notifcall ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "notif off" or text.lower() == 'notif off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["nCall"] = False
sendTextTemplate906(msg.to,"notifcall ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "notifcall on" or text.lower() == 'notifcall on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["notif"] = True
sendTextTemplate906(msg.to,"notifcall ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "notifcall off" or text.lower() == 'notifcall off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["notif"] = False
sendTextTemplate906(msg.to,"notifcall ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "yt on" or text.lower() == 'yt on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["media"] = True
sendTextTemplate906(msg.to,"media ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "yt off" or text.lower() == 'yt off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["media"] = False
sendTextTemplate906(msg.to,"media ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
sendTextTemplate906(msg.to,"ʟᴇᴀᴠᴇ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
sendTextTemplate906(msg.to,"ʟᴇᴀᴠᴇ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
sendTextTemplate906(msg.to,"ᴀᴅᴅ ᴍᴏᴅᴇ ᴏn")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
sendTextTemplate906(msg.to,"ᴀᴅᴅ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
sendTextTemplate906(msg.to,"sᴛɪᴄᴋᴇʀ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
sendTextTemplate906(msg.to,"sᴛɪᴄᴋᴇʀ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "stc on" or text.lower() == 'st on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["jumbosticker"] = True
sendTextTemplate906(msg.to,"sᴛɪᴄᴋᴇʀ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "stc off" or text.lower() == 'st off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["jumbosticker"] = False
sendTextTemplate906(msg.to,"sᴛɪᴄᴋᴇʀ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = True
sendTextTemplate906(msg.to,"ᴊᴏɪɴᴛɪᴄᴋᴇᴛ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
settings["autoJoinTicket"] = False
sendTextTemplate906(msg.to,"ᴊᴏɪɴᴛɪᴄᴋᴇᴛ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "autoblock on" or text.lower() == 'autoblock on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = True
sendTextTemplate906(msg.to,"ʙʟᴏᴄᴋ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "autoblock off" or text.lower() == 'autoblock off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoBlock"] = False
sendTextTemplate906(msg.to,"ʙʟᴏᴄᴋ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "post on" or text.lower() == 'post on':
if wait["selfbot"] == True:
if msg._from in admin:
settings["checkPost"] = True
sendTextTemplate906(msg.to,"ᴀᴜᴛᴏ ᴘᴏsᴛ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "post off" or text.lower() == 'post off':
if wait["selfbot"] == True:
if msg._from in admin:
settings["checkPost"] = False
sendTextTemplate906(msg.to,"ᴀᴜᴛᴏ ᴘᴏsᴛ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "like on" or text.lower() == 'like on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["likeon"] = True
sendTextTemplate906(msg.to,"ᴘᴏsᴛ ᴍᴏᴅᴇ ᴏɴ")
elif cmd == "like off" or text.lower() == 'like off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["likeon"] = False
sendTextTemplate906(msg.to,"ᴘᴏsᴛ ᴍᴏᴅᴇ ᴏғғ")
elif cmd == "invite on" or text.lower() == 'invite on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["invite"] = True
sendTextTemplate906(msg.to, "ᴋɪʀɪᴍ ᴋᴏɴᴛᴀᴋ'ɴʏᴀ")
elif cmd == "invite off" or text.lower() == 'invite off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["invite"] = False
sendTextTemplate906(msg.to,"ɪɴᴠɪᴛᴇ ᴄᴏɴᴛᴀᴄᴛ ᴏɴ")
if cmd == "unsend on":
if msg._from in admin:
wait["Unsend"] = True
sendTextTemplate906(msg.to, "Unsend mode on")
if cmd == "unsend off":
if msg._from in admin:
wait["Unsend"] = False
sendTextTemplate906(msg.to, "Unsend mode off")
elif "autoreject " in msg.text.lower():
xpesan = msg.text.lower()
xres = xpesan.replace("autoreject ","")
if xres == "off":
wait['autoReject'] = False
sendTextTemplate906(msg.to,"❎Reject already Off")
elif xres == "on":
wait['autoReject'] = True
sendTextTemplate906(msg.to,"✅Reject already On")
elif cmd == "autoread on":
if msg._from in admin:
if settings["autoRead"] == True:
sendTextTemplate906(to, "Auto read telah aktif")
else:
settings["autoRead"] = True
sendTextTemplate906(to, "Berhasil mengaktifkan auto read")
elif cmd == "autoread off":
if msg._from in admin:
if settings["autoRead"] == False:
sendTextTemplate906(to, "Auto read telah nonaktif")
else:
settings["autoRead"] = False
sendTextTemplate906(to, "Berhasil menonaktifkan auto read")
elif cmd.startswith("setcomment: "):
if msg._from in admin:
sep = text.split(" ")
txt = text.replace(sep[0] + " ","")
try:
wait["comment"] = txt
sendTextTemplate23(to, "❂Done Mengubah Pesan\n❂CommentTL:\n❂ {}".format(txt))
except:
sendTextTemplate23(to, "❂Failed")
#==================================#
elif cmd == "refresh" or text.lower() == 'seger':
if msg._from in owner or msg._from in admin or msg._from in staff:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
sendTextTemplate12(msg.to,"Clean..")
sendTextTemplate12(msg.to,"Refresh done 💯")
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin.append(target)
sendTextTemplate906(msg.to,"✅Berhasil menambahkan admin")
except:
pass
#smule download
#==============================================================
elif "kutub" in msg.text.lower():
if wait["media"] == True:
try:
regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
links = re.findall(regex, text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for urla in n_links:
zagus = urla
link = pafy.new(zagus)
v=link.streams
for a in v:
mp3=a.url
cl.sendAudioWithURL(to,mp3)
for b in v:
mp4=b.url
cl.sendVideoWithURL(to,mp4)
except:pass
#cl.sendMessage(to, str(e))
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
sendTextTemplate906(msg.to,"✅Berhasil menambahkan staff")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
admin.remove(target)
sendTextTemplate906(msg.to,"✅Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Saints:
try:
staff.remove(target)
sendTextTemplate906(msg.to,"✅Berhasil menghapus admin")
except:
pass
#===========COMMAND BLACKLIST============#
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
sendTextTemplate906(msg.to,"✅Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
sendTextTemplate906(msg.to,"✅menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
sendTextTemplate906(msg.to,"📲Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
sendTextTemplate906(msg.to,"📲Kirim kontaknya...")
elif cmd == "wanted" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
sendTextTemplate906(msg.to,"Tak ada daftar buronan")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
sendTextTemplate23(msg.to,"Blacklist User\n\n"+ma+"\nTotal「%s」Blacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "blc" or text.lower() == 'bl':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
sendTextTemplate906(msg.to,"Janda kosong")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "cban" or text.lower() == 'bebas':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "「%i」Bersih" % len(ragets)
sendTextTemplate906(msg.to,"Janda bodong dibebaskan" +mc)
#==========Setting bot========
elif 'Set hapus: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set hapus: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Pesan clear")
else:
wait["dell"] = spl
sendTextTemplate906(msg.to, "「clear」\clearl diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
sendTextTemplate906(msg.to, "「Pesan Msg」\nPesan Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
sendTextTemplate906(msg.to, "「Welcome Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set ghost: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set ghost: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Ghost Msg")
else:
wait["flexghost"] = spl
sendTextTemplate906(msg.to, "「Ghost Msg」\nWelcome Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set autoleave: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set autoleave: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Autoleave Msg")
else:
wait["autoLave"] = spl
sendTextTemplate906(msg.to, "「Autoleave Msg」\nAutoleave Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set bc: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set bc: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Broadcast Msg")
else:
wait["broad"] = spl
sendTextTemplate906(msg.to, "「Broadcast Msg」\n Broadcast Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon2: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon2: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag2"] = spl
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon3: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon3: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag3"] = spl
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set respon4: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon4: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag4"] = spl
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
sendTextTemplate906(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
sendTextTemplate906(msg.to, "「Sider Msg」\nSider Msg diganti jadi :\n\n「{}」".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Pesan Msg」\nPesan Msg mu :\n\n「 " + str(wait["message"]) + " 」")
elif text.lower() == "cek welcome":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Welcome Msg」\nWelcome Msg mu :\n\n「 " + str(wait["welcome"]) + " 」")
elif text.lower() == "cek leave":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Autoleave Msg」\nAutoleave Msg mu :\n\n「 " + str(wait["autoleave"]) + " 」")
elif text.lower() == "cek respon":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg mu :\n\n「 " + str(wait["Respontag"]) + " 」")
elif text.lower() == "cek respon2":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Respon Msg」\nRespon Msg mu :\n\n「 " + str(wait["Respontag2"]) + " 」")
elif text.lower() == "cek sider":
if msg._from in admin:
sendTextTemplate906(msg.to, "「Sider Msg」\nSider Msg mu :\n\n「 " + str(wait["mention"]) + " 」")
#___________________HIBURAN____________________
elif cmd == "me2" or text.lower() == 'gue':
contact = cl.getProfile()
mids = [contact.mid]
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
status = cl.getContact(sender)
# cover = cl.getProfileCoverURL(sender)
data = {
"type": "flex",
"altText": "♻️𝖉𝖚𝖉𝖚𝖑 𝖇𝖔𝖙𝖘♻️",
"contents": {
"styles": {
"body": {
"backgroundColor": "#0000ff"
},
"footer": {
"backgroundColor": "#2f2f4f"
}
},
"type": "bubble",
"size": "micro",
"body": {
"contents": [
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "xxs"
},{
"type": "text",
"text": "sᴇʟғʙᴏᴛ",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴛᴇᴍᴘʟᴀᴛᴇ",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴠᴇʀsɪ³",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "xxs"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"text": "♻️𝖉𝖚𝖉𝖚𝖑",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#33ffff"
},
{
"text": "𝖇𝖔𝖙𝖘♻️️",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#ff0000"
},
{
"text": "ᴱᴸᴵᵀᴱ",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(msg._from).pictureStatus),
"size": "xxs",
"aspectMode": "cover",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 0
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "text",
"text": "🚹{}".format(cl.getContact(sender).displayName),
"weight": "bold",
"color": "#000080",
#"align": "center",
"size": "xxs",
"flex": 0
},{
"type": "separator",
"color": "#ff0000"
},{
"type": "text",
"text": "🕙"+ datetime.strftime(timeNow,'%H:%M:%S'+"ᴡɪʙ"),
"weight": "bold",
"color": "#000080",
#"align": "center",
"size": "xxs",
"flex": 0
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"type": "text",
"text": "sᴛᴀᴛᴜs ",
"weight": "bold",
"color": "#000080",
"align": "center",
"size": "xxs",
"flex": 0
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"text": "{}".format(status.statusMessage),
"size": "xxs",
"align": "center",
"color": "#000080",
"wrap": True,
"weight": "bold",
"type": "text"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"text": "♻️𝖉𝖚𝖉𝖚𝖑 ",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#ff0000"
},
{
"text": "𝖇𝖔𝖙𝖘♻️",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},{
"type": "separator",
"color": "#ff0000"
},
{
"text": "ᴱᴸᴵᵀᴱ",
"size": "xxs",
"color": "#000080",
"align": "center",
"wrap": True,
"weight": "bold",
"type": "text"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"type": "image",
"url": "https://i.ibb.co/T4wVtG2/20210109-123328.jpg",
"size": "xl",
"action": {
"type": "uri",
"uri": "https://youtube.com",
},
"flex": 1
},
{
"type": "image",
"url": "linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01",
},
"flex": 1
},
{
"type": "image",
"url": "chathttps://i.ibb.co/b53ztTR/20190427-191019.png", #linehttps://icon-icons.com/icons2/70/PNG/512/line_14096.png", #line
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/chat" #"http://line.me/ti/p/~greetolala999",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif", #smule
"size": "xl",
"action": {
"type": "uri",
"uri": "Https://smule.com/joker_alva",
},
"flex": 1
},
{
"type": "image",
"url": "https://i.ibb.co/Wf8bQ2Z/20190625-105354.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/cameraRoll/multi"
},
"flex": 1
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/1sGhJdC/20190428-232658.png",
"size": "xl",
"action": {
"type": "uri",
"uri": "line://nv/timeline"
},
"flex": 1
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
},{
"contents": [
{
"type": "separator",
"color": "#ff0000"
},
{
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "xxs"
},{
"type": "text",
"text": "ᴛʜᴀɴᴋᴢ ғᴏʀ",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "sᴜᴘᴏʀᴛ",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},{
"type": "text",
"text": "ᴛᴇᴀᴍ",
"weight": "bold",
"color": "#000080",
"size": "xxs",
"flex": 0
},
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "xxs"
}
],
"type": "box",
"spacing": "xs",
"layout": "horizontal"
},
{
"type": "separator",
"color": "#ff0000"
}
],
"type": "box",
"layout": "horizontal"
},
{
"type": "separator", #batas APK
"color": "#ff0000"
}
],
"type": "box",
"layout": "vertical"
}
],
"type": "box",
"spacing": "xs",
"layout": "vertical"
}
}
}
cl.postTemplate(to, data)
elif cmd == "me":
contact = cl.getProfile()
mids = [contact.mid]
warna6 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya6 = random.choice(warna6)
warna4 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya4 = random.choice(warna4)
warna5 = ("#76560B","#696969","#09616B","#8B055A","#03137F","#6A037F","#7F3403")
warnanya5 = random.choice(warna5)
status = cl.getContact(sender)
cover = cl.getProfileCoverURL(sender)
data = {
"type": "carousel",
"contents": [
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://obs.line-scdn.net/{}".format(cl.getContact(msg._from).pictureStatus),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(sender).displayName),
"size": "xxs",
"color": warnanya5,
"wrap": True,
"offsetStart": "10px"
}
],
"height": "17px",
"offsetTop": "-17px",
"offsetStart": "18px"
}
],
"position": "absolute",
"offsetStart": "2px",
"offsetEnd": "0px",
"paddingAll": "20px",
"paddingTop": "18px",
"borderColor": warnanya6,
"cornerRadius": "10px",
"width": "145px",
"height": "25px",
"offsetTop": "142px",
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "COVER",
"color": warnanya4,
"align": "center",
"size": "xxs",
"offsetTop": "3px"
}
],
"position": "absolute",
"cornerRadius": "20px",
"offsetTop": "2px",
"offsetStart": "2px",
"height": "25px",
"width": "53px",
"borderWidth": "3px",
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xxs",
"color": warnanya4,
"style": "normal",
"weight": "bold",
"offsetTop": "3px",
"offsetStart": "7px"
}
],
"position": "absolute",
"width": "103px",
"height": "27px",
"backgroundColor": warnanya6,
"offsetTop": "160px",
"offsetStart": "40px",
"borderWidth": "3px",
"borderColor": warnanya4,
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "full",
"aspectRatio": "1:1",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "45px",
"height": "45px",
"borderWidth": "3px",
"borderColor": warnanya6,
"cornerRadius": "10px",
"offsetTop": "143px",
"offsetStart": "2px"
}
],
"paddingAll": "0px",
"borderWidth": "4px",
"borderColor": warnanya4,
"cornerRadius": "10px",
"height": "200px"
}
},
{
"type": "bubble",
"size": "micro",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": str(cl.getProfileCoverURL(sender)),
"size": "full",
"aspectMode": "cover",
"aspectRatio": "2:3",
"gravity": "top"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "{}".format(cl.getContact(sender).displayName),
"size": "xxs",
"color": warnanya5,
"wrap": True,
"offsetStart": "10px"
}
],
"height": "17px",
"offsetTop": "-17px",
"offsetStart": "18px"
}
],
"position": "absolute",
"offsetStart": "2px",
"offsetEnd": "0px",
"paddingAll": "20px",
"paddingTop": "18px",
"borderColor": warnanya6,
"cornerRadius": "10px",
"width": "145px",
"height": "25px",
"offsetTop": "142px",
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "COVER",
"color": warnanya4,
"align": "center",
"size": "xxs",
"offsetTop": "3px"
}
],
"position": "absolute",
"cornerRadius": "20px",
"offsetTop": "2px",
"offsetStart": "2px",
"height": "25px",
"width": "53px",
"borderWidth": "3px",
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "💎𝒅𝒖𝒅𝒖𝒍 𝒃𝒐𝒕𝒔✍",
"size": "xxs",
"color": warnanya5,
"style": "normal",
"weight": "bold",
"offsetTop": "3px",
"offsetStart": "7px"
}
],
"position": "absolute",
"width": "103px",
"height": "27px",
"backgroundColor": warnanya6,
"offsetTop": "160px",
"offsetStart": "40px",
"borderWidth": "3px",
"borderColor": warnanya4,
"cornerRadius": "5px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://i.ibb.co/h9nLycK/1617387582638.gif",
"size": "full",
"aspectRatio": "1:1",
"aspectMode": "cover"
}
],
"position": "absolute",
"width": "45px",
"height": "45px",
"borderWidth": "3px",
"borderColor": warnanya5,
"cornerRadius": "10px",
"offsetTop": "143px",
"offsetStart": "2px"
}
],
"paddingAll": "0px",
"borderWidth": "4px",
"borderColor": warnanya6,
"cornerRadius": "10px",
"height": "200px"
}
}
]
}
cl.postFlex(to, data)
elif cmd == "mempict":
if msg._from in admin:
kontak = cl.getGroup(to)
group = kontak.members
picall = []
for ids in group:
if len(picall) >= 400:
pass
else:
picall.append({
"imageUrl": "https://os.line.naver.jp/os/p/{}".format(ids.mid),
"action": {
"type": "uri",
"uri": "http://line.me/ti/p/~waentur01"
}
}
)
k = len(picall)//10
for aa in range(k+1):
data = {
"type": "template",
"altText": "{} membagikan janda".format(cl.getProfile().displayName),
"template": {
"type": "image_carousel",
"columns": picall[aa*10 : (aa+1)*10]
}
}
cl.postTemplate(to, data)
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
#if msg._from in admin or msg._from in owner:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
sendTextTemplate12(msg.to, "Aing Lebet: %s" % str(group.name))
#===========add img============#
elif text.lower() == "cekbot":
if msg._from in admin:
try:cl.inviteIntoGroup(to, ["u1638c6ae2cb49719c33ab35b56b884be"]);has = "OK"
except:has = "NOT"
try:cl.kickoutFromGroup(to, ["u1638c6ae2cb49719c33ab35b56b884be"]);has1 = "OK"
except:has1 = "NOT"
if has == "OK":sil = "😠"
else:sil = "😥"
if has1 == "OK":sil1 = "😠"
else:sil1 = "😥"
sendTextTemplate12(to, "Kick: {} \nInvite: {}".format(sil1,sil))
#===============HIBURAN============================#
elif cmd.startswith("addmp3 "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in audios:
settings["Addaudio"]["status"] = True
settings["Addaudio"]["name"] = str(name.lower())
audios[str(name.lower())] = ""
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to,"Silahkan kirim mp3 nya...")
else:
sendTextTemplate12(msg.to, "Mp3 itu sudah dalam list")
elif cmd.startswith("dellmp3 "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in audios:
cl.deleteFile(audios[str(name.lower())])
del audios[str(name.lower())]
f = codecs.open("audio.json","w","utf-8")
json.dump(audios, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(msg.to, "Done hapus mp3 {}".format( str(name.lower())))
else:
sendTextTemplate12(msg.to, "Mp3 itu tidak ada dalam list")
elif cmd == "listmp3":
if msg._from in admin:
no = 0
ret_ = "╔═══❲ My Music ❳════\n"
for audio in audios:
ret_ += "┣[]◇ " + audio.title() + "\n"
ret_ += "╚═══❲ {} Record ❳════".format(str(len(audios)))
sendTextTemplate12(to, ret_)
elif cmd.startswith("addsticker "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in stickers:
settings["Addsticker"]["status"] = True
settings["Addsticker"]["name"] = str(name.lower())
stickers[str(name.lower())] = ""
f = codecs.open("Sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Silahkan kirim stickernya...")
else:
sendTextTemplate12(to, "Sticker itu sudah dalam list")
elif cmd.startswith("dellsticker "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in stickers:
del stickers[str(name.lower())]
f = codecs.open("sticker.json","w","utf-8")
json.dump(stickers, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Berhasil menghapus sticker {}".format( str(name.lower())))
else:
sendTextTemplate12(to, "Sticker ada di list")
elif cmd == "liststicker":
if msg._from in admin:
no = 0
ret_ = "╔═══❲ My Sticker ❳════\n"
for sticker in stickers:
ret_ += "┣[]◇ " + sticker.title() + "\n"
ret_ += "╚═══❲ {} Stickers ❳════".format(str(len(stickers)))
sendTextTemplate12(to, ret_)
elif cmd.startswith("addimg "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in images:
settings["Addimage"]["status"] = True
settings["Addimage"]["name"] = str(name.lower())
images[str(name.lower())] = ""
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Silahkan kirim fotonya")
else:
sendTextTemplate12(to, "Foto Udah dalam list")
elif cmd.startswith("dellimg "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in images:
cl.deleteFile(images[str(name.lower())])
del images[str(name.lower())]
f = codecs.open("image.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Berhasil menghapus {}".format( str(name.lower())))
else:
sendTextTemplate12(to, "Foto ada dalam list")
elif cmd == "listimage":
if msg._from in admin:
no = 0
ret_ = "╭───「 Daftar Image 」\n"
for audio in audios:
no += 1
ret_ += str("├≽") + " " + audio.title() + "\n"
ret_ += "╰───「 Total {} Image 」".format(str(len(audios)))
sendTextTemplate12(to, ret_)
#==============add video==========================================================================
elif cmd.startswith("addvideo"):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name not in images:
settings["Addvideo"]["status"] = True
settings["Addvideo"]["name"] = str(name.lower())
images[str(name.lower())] = ""
f = codecs.open("video.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Silahkan kirim video nya...")
else:
sendTextTemplate12(to, "video sudah ada")
elif cmd.startswith("dellvideo "):
if msg._from in admin:
sep = text.split(" ")
name = text.replace(sep[0] + " ","")
name = name.lower()
if name in images:
cl.deleteFile(images[str(name.lower())])
del images[str(name.lower())]
f = codecs.open("video.json","w","utf-8")
json.dump(images, f, sort_keys=True, indent=4, ensure_ascii=False)
sendTextTemplate12(to, "Berhasil menghapus {}".format( str(name.lower())))
else:
sendTextTemplate12(to, "video tidak ada")
elif cmd == "listvideo":
if msg._from in admin:
no = 0
ret_ = "╭───「 Daftar Video 」\n"
for audio in audios:
no += 1
ret_ += str("├≽") + " " + audio.title() + "\n"
ret_ += "╰───「 Total {} Video 」".format(str(len(audios)))
sendTextTemplate12(to, ret_)
except Exception as error:
print (error)
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
bot(op)
# Don't remove this line, if you wan't get error soon!
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| 42.003468 | 7,706 | 0.338357 |
c2b29da1b056994d949d2253507d9f09573c9e1e
| 1,370 |
py
|
Python
|
Codigos Python/Plus_One.py
|
BrunoHarlis/Solucoes_LeetCode
|
cca9b1331cbfe7d8dc8d844a810ac651a92d8c97
|
[
"MIT"
] | null | null | null |
Codigos Python/Plus_One.py
|
BrunoHarlis/Solucoes_LeetCode
|
cca9b1331cbfe7d8dc8d844a810ac651a92d8c97
|
[
"MIT"
] | null | null | null |
Codigos Python/Plus_One.py
|
BrunoHarlis/Solucoes_LeetCode
|
cca9b1331cbfe7d8dc8d844a810ac651a92d8c97
|
[
"MIT"
] | null | null | null |
# Fonte: https://leetcode.com/problems/plus-one/
# Autor: Bruno Harlis
# Data: 10/08/2021
"""
Dada uma matriz não vazia de dígitos decimais representando um
inteiro não negativo, incremente um para o inteiro.
Os dígitos são armazenados de forma que o dígito mais significativo
esteja no topo da lista e cada elemento na matriz contenha um único dígito.
Você pode assumir que o inteiro não contém nenhum zero à esquerda,
exceto o próprio número 0.
Exemplo 1:
Entrada: dígitos = [1,2,3]
Saída: [1,2,4]
Explicação: A matriz representa o inteiro 123.
Exemplo 2:
Entrada: dígitos = [4,3,2,1]
Saída: [4,3,2,2]
Explicação: A matriz representa o inteiro 4321.
Tempo de execução : 28 ms, mais rápido do que 88,60 % das submissões.
Uso de memória : 14,2 MB, menos de 76,32 % dos envios.
"""
def plusOne(digits):
index = len(digits)-1
count = 0
for i in range(len(digits)-1, -1, -1):
if digits[i] == 9:
digits.pop()
index -= 1
count += 1
else:
digits[i] += 1
break
if index < 0:
digits.append(1)
for x in range(count):
digits.append(0)
return digits
if __name__ == '__main__':
print(plusOne([1, 2, 3]))
print(plusOne([4, 3, 2, 1]))
print(plusOne([0]))
print(plusOne([9]))
print(plusOne([9, 9]))
print(plusOne([1, 9, 9]))
| 23.220339 | 75 | 0.627007 |
cf6d947d715f3bdc7e81a44c61639cc911cd006d
| 18,512 |
py
|
Python
|
ietf/utils/management/commands/create_group_wikis.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/utils/management/commands/create_group_wikis.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
ietf/utils/management/commands/create_group_wikis.py
|
unofficial-mirror/ietfdb
|
ce54adb30dc7299c6eb4d42b9aa9d2c2929c1a81
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright The IETF Trust 2016-2019, All Rights Reserved
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import copy
import io
import pkg_resources
import six
import syslog
from trac.core import TracError
from trac.env import Environment
from trac.perm import PermissionSystem
from trac.ticket.model import Component, Milestone, Severity
from trac.util.text import unicode_unquote
from trac.wiki.model import WikiPage
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.template.loader import render_to_string
import debug # pyflakes:ignore
from ietf.group.models import Group, GroupURL, GroupFeatures
from ietf.utils.pipe import pipe
logtag = __name__.split('.')[-1]
logname = "user.log"
syslog.openlog(str(logtag), syslog.LOG_PID, syslog.LOG_USER)
class Command(BaseCommand):
help = "Create group wikis for WGs, RGs and Areas which don't have one."
def add_arguments(self, parser):
parser.add_argument('--wiki-dir-pattern', dest='wiki_dir_pattern',
default=settings.TRAC_WIKI_DIR_PATTERN,
help='A pattern with %s placeholder for group wiki path')
parser.add_argument('--svn-dir-pattern', dest='svn_dir_pattern',
default=settings.TRAC_SVN_DIR_PATTERN,
help='A pattern with %s placeholder for group svn path')
parser.add_argument('--group-list', '-g', dest='group_list', help='Limit processing to groups with the given acronyms (a comma-separated list)')
parser.add_argument('--dummy-run', '-n', default=False, action='store_true', dest='dummy_run', help='Make no changes, just show what would be done')
secretariat = Group.objects.get(acronym='secretariat')
def note(self, msg):
if self.verbosity > 1:
self.stdout.write(msg)
def log(self, msg):
syslog.syslog(msg)
self.stdout.write(msg)
self.stderr.write(msg)
# --- svn ---
def do_cmd(self, cmd, *args):
quoted_args = [ '"%s"'%a if ' ' in a else a for a in args ]
if self.dummy_run:
self.note("Would run %s %s ..." % (os.path.basename(cmd), " ".join(quoted_args)))
else:
self.note("Running %s %s ..." % (os.path.basename(cmd), " ".join(quoted_args)))
command = [ cmd, ] + list(args)
command = ' '.join(command).encode('utf-8')
code, out, err = pipe(command)
out = out.decode('utf-8')
err = err.decode('utf-8')
msg = None
if code != 0:
msg = "Error %s: %s when executing '%s'" % (code, err, " ".join(command))
self.log(msg)
return msg, out
def svn_admin_cmd(self, *args):
return self.do_cmd(settings.SVN_ADMIN_COMMAND, *args)
def create_svn(self, svn):
if self.dummy_run:
self.note(" Would create svn repository: %s" % svn)
return "Dummy run, no svn repo created"
else:
self.note(" Creating svn repository: %s" % svn)
if not os.path.exists(os.path.dirname(svn)):
msg = "Intended to create '%s', but parent directory is missing" % svn
self.log(msg)
return msg
err, out= self.svn_admin_cmd("create", svn )
if err:
self.log(err)
return err
return ""
# --- trac ---
def remove_demo_components(self, env):
for component in Component.select(env):
if component.name.startswith('component'):
component.delete()
def remove_demo_milestones(self, env):
for milestone in Milestone.select(env):
if milestone.name.startswith('milestone'):
milestone.delete()
def symlink_to_master_assets(self, path, env):
master_dir = settings.TRAC_MASTER_DIR
master_htdocs = os.path.join(master_dir, "htdocs")
group_htdocs = os.path.join(path, "htdocs")
self.note(" Symlinking %s to %s" % (master_htdocs, group_htdocs))
os.removedirs(group_htdocs)
os.symlink(master_htdocs, group_htdocs)
def add_wg_draft_states(self, group, env):
for state in settings.TRAC_ISSUE_SEVERITY_ADD:
self.note(" Adding severity %s" % state)
severity = Severity(env)
severity.name = state
severity.insert()
def add_wiki_page(self, env, name, text):
page = WikiPage(env, name)
if page.time:
self.note(" ** Page %s already exists, not adding it." % name)
return
page.text = text
page.save(author="(System)", comment="Initial page import")
def add_default_wiki_pages(self, env):
dir = pkg_resources.resource_filename('trac.wiki', 'default-pages')
#WikiAdmin(env).load_pages(dir)
with env.db_transaction:
for name in os.listdir(dir):
filename = os.path.join(dir, name)
name = unicode_unquote(name.encode('utf-8'))
if os.path.isfile(filename):
self.note(" Adding page %s" % name)
with io.open(filename, encoding='utf-8') as file:
text = file.read()
self.add_wiki_page(env, name, text)
def add_custom_wiki_pages(self, group, env):
for templ in settings.TRAC_WIKI_PAGES_TEMPLATES:
_, name = os.path.split(templ)
text = render_to_string(templ, {"group": group})
self.note(" Adding page %s" % name)
self.add_wiki_page(env, name, text)
def sync_default_repository(self, group, env):
repository = env.get_repository('')
if repository:
self.note(" Indexing default repository")
repository.sync()
else:
self.log("Trac environment '%s' does not have any repository" % env)
def create_adhoc_trac(self, name, path):
if not os.path.exists(os.path.dirname(path)):
msg = "Intended to create '%s', but parent directory is missing" % path
self.log(msg)
return None, msg
options = copy.deepcopy(settings.TRAC_ENV_OPTIONS)
# Interpolate group field names to values in the option settings:
remove = []
for i in range(len(options)):
sect, key, val = options[i]
if key in ['repository_type', 'repository_dir', ]:
remove = [i] + remove
else:
val = val.format(name=name)
options[i] = sect, key, val
for i in remove:
del options[i]
# Try to creat ethe environment, remove unwanted defaults, and add
# custom pages and settings.
if self.dummy_run:
self.note("Would create Trac for '%s' at %s" % (name, path))
return None, "Dummy run, no trac created"
else:
try:
self.note("Creating Trac for '%s' at %s" % (name, path))
env = Environment(path, create=True, options=options)
self.remove_demo_components(env)
self.remove_demo_milestones(env)
# Use custom assets (if any) from the master setup
self.symlink_to_master_assets(path, env)
self.add_custom_wiki_pages(name, env)
self.add_default_wiki_pages(env)
# Permissions will be handled during permission update later.
return env, ""
except TracError as e:
msg = "While creating Trac instance for %s: %s" % (name, e)
self.log(msg)
return None, msg
def create_group_trac(self, group):
if not os.path.exists(os.path.dirname(group.trac_dir)):
msg = "Intended to create '%s', but parent directory is missing" % group.trac_dir
self.log(msg)
return None, msg
options = copy.deepcopy(settings.TRAC_ENV_OPTIONS)
# Interpolate group field names to values in the option settings:
for i in range(len(options)):
sect, key, val = options[i]
val = val.format(**group.__dict__)
options[i] = sect, key, val
# Try to create the environment, remove unwanted defaults, and add
# custom pages and settings.
if self.dummy_run:
self.note("Would create Trac for group '%s' at %s" % (group.acronym, group.trac_dir))
return None, "Dummy run, no trac created"
else:
try:
self.note("Creating Trac for group '%s' at %s" % (group.acronym, group.trac_dir))
env = Environment(group.trac_dir, create=True, options=options)
self.remove_demo_components(env)
self.remove_demo_milestones(env)
self.maybe_add_group_url(group, 'Wiki', settings.TRAC_WIKI_URL_PATTERN % group.acronym)
self.maybe_add_group_url(group, 'Issue tracker', settings.TRAC_ISSUE_URL_PATTERN % group.acronym)
# Use custom assets (if any) from the master setup
self.symlink_to_master_assets(group.trac_dir, env)
if group.features.acts_like_wg:
self.add_wg_draft_states(group, env)
self.add_custom_wiki_pages(group, env)
self.add_default_wiki_pages(env)
self.sync_default_repository(group, env)
# Components (i.e., drafts) will be handled during components
# update later
# Permissions will be handled during permission update later.
return env, ""
except (TracError, IOError) as e:
msg = "While creating Trac instance for %s: %s" % (group, e)
self.log(msg)
return None, msg
def update_trac_permissions(self, name, group, env):
if self.dummy_run:
self.note("Would update Trac permissions for '%s' from group %s" % (name, group.acronym))
else:
self.note("Updating Trac permissions for '%s' from group %s" % (name, group.acronym))
mgr = PermissionSystem(env)
permission_list = mgr.get_all_permissions()
permission_list = [ (u,a) for (u,a) in permission_list if not u in ['anonymous', 'authenticated']]
permissions = {}
for user, action in permission_list:
if not user in permissions:
permissions[user] = []
permissions[user].append(action)
roles = ( list( group.role_set.filter(name_id__in=set(['chair', 'secr', 'ad', 'trac-admin', ]+group.features.admin_roles)))
+ list(self.secretariat.role_set.filter(name_id__in=['trac-admin', ]) ))
users = []
for role in roles:
user = role.email.address.lower()
users.append(user)
if not user in permissions:
try:
self.note(" Granting admin permission for %s" % user)
mgr.grant_permission(user, 'TRAC_ADMIN')
if not user in permissions:
permissions[user] = []
permissions[user].append('TRAC_ADMIN')
except TracError as e:
self.log("While adding admin permission for %s: %s" (user, e))
for user in permissions:
if not user in users:
if 'TRAC_ADMIN' in permissions[user]:
try:
self.note(" Revoking admin permission for %s" % user)
mgr.revoke_permission(user, 'TRAC_ADMIN')
except TracError as e:
self.log("While revoking admin permission for %s: %s" (user, e))
def update_trac_components(self, group, env):
if self.dummy_run:
self.note("Would update Trac components for group '%s'" % group.acronym)
else:
self.note("Updating Trac components for group '%s'" % group.acronym)
components = Component.select(env)
comp_names = [ c.name for c in components ]
group_docs = group.document_set.filter(states__slug='active', type_id='draft').distinct()
group_comp = []
for doc in group_docs:
if not doc.name.startswith('draft-'):
self.log("While adding components: unexpectd %s group doc name: %s" % (group.acronym, doc.name))
continue
name = doc.name[len('draft-'):]
if name.startswith('ietf-'):
name = name[len('ietf-'):]
elif name.startswith('irtf-'):
name = name[len('ietf-'):]
if name.startswith(group.acronym+'-'):
name = name[len(group.acronym+'-'):]
group_comp.append(name)
if not name in comp_names and not doc.name in comp_names:
self.note(" Group draft: %s" % doc.name)
self.note(" Adding component %s" % name)
comp = Component(env)
comp.name = name
comp.owner = "%[email protected]" % doc.name
comp.insert()
def maybe_add_group_url(self, group, name, url):
urls = [ u for u in group.groupurl_set.all() if name.lower() in u.name.lower() ]
if not urls:
self.note(" adding %s %s URL ..." % (group.acronym, name.lower()))
url = GroupURL.objects.create(group=group, name=name, url=url)
group.groupurl_set.add(url)
def add_custom_pages(self, group, env):
for template_name in settings.TRAC_WIKI_PAGES_TEMPLATES:
pass
def add_custom_group_states(self, group, env):
for state_name in settings.TRAC_ISSUE_SEVERITY_ADD:
pass
# --------------------------------------------------------------------
def handle(self, *filenames, **options):
self.verbosity = options['verbosity']
self.errors = []
self.wiki_dir_pattern = options.get('wiki_dir_pattern', settings.TRAC_WIKI_DIR_PATTERN)
self.svn_dir_pattern = options.get('svn_dir_pattern', settings.TRAC_SVN_DIR_PATTERN)
self.group_list = options.get('group_list', None)
self.dummy_run = options.get('dummy_run', False)
self.wiki_dir_pattern = os.path.join(str(settings.BASE_DIR), str('..'), self.wiki_dir_pattern)
self.svn_dir_pattern = os.path.join(settings.BASE_DIR, '..', self.svn_dir_pattern)
if not self.group_list is None:
self.group_list = self.group_list.split('.')
if isinstance(self.verbosity, six.string_types) and self.verbosity.isdigit():
self.verbosity = int(self.verbosity)
if self.dummy_run and self.verbosity < 2:
self.verbosity = 2
if not os.path.exists(os.path.dirname(self.wiki_dir_pattern)):
raise CommandError('The Wiki base direcory specified for the wiki directories (%s) does not exist.' % os.path.dirname(self.wiki_dir_pattern))
if not os.path.exists(os.path.dirname(self.svn_dir_pattern)):
raise CommandError('The SVN base direcory specified for the SVN directories (%s) does not exist.' % os.path.dirname(self.svn_dir_pattern))
gtypes = [ f.type for f in GroupFeatures.objects.filter(create_wiki=True) ]
gfilter = Q(type__in=gtypes, state__slug__in=settings.TRAC_CREATE_GROUP_STATES)
gfilter |= Q(acronym__in=settings.TRAC_CREATE_GROUP_ACRONYMS)
groups = Group.objects.filter(gfilter).order_by('acronym')
if self.group_list:
groups = groups.filter(acronym__in=self.group_list)
for group in groups:
try:
self.note("Processing group '%s'" % group.acronym)
group.trac_dir = self.wiki_dir_pattern % group.acronym
group.svn_dir = self.svn_dir_pattern % group.acronym
if not os.path.exists(group.svn_dir):
err = self.create_svn(group.svn_dir)
if err:
self.errors.append(err)
if not os.path.exists(group.trac_dir):
trac_env, msg = self.create_group_trac(group)
if not trac_env:
self.errors.append(msg)
else:
trac_env = Environment(group.trac_dir)
if not trac_env and not self.dummy_run:
continue
self.update_trac_permissions(group.acronym, group, trac_env)
self.update_trac_components(group, trac_env)
except Exception as e:
self.errors.append(e)
self.log("While processing %s: %s" % (group.acronym, e))
raise
for acronym, name, path in settings.TRAC_CREATE_ADHOC_WIKIS:
try:
self.note("Processing wiki '%s'" % name)
if not os.path.isabs(path):
path = self.wiki_dir_pattern % path
if not os.path.exists(path):
trac_env, msg = self.create_adhoc_trac(name, path)
if not trac_env:
self.errors.append(msg)
else:
trac_env = Environment(path)
if not trac_env and not self.dummy_run:
continue
if acronym.endswith('*'):
groups = Group.objects.filter(acronym__startswith=acronym[:-1], state_id='active')
for group in groups:
self.update_trac_permissions(name, group, trac_env)
else:
group = Group.objects.get(acronym=acronym)
self.update_trac_permissions(name, group, trac_env)
except Exception as e:
self.errors.append(e)
self.log("While processing %s: %s" % (name, e))
raise
if self.errors:
raise CommandError("There were %s failures in WG Trac creation:\n %s" % (len(self.errors), "\n ".join(self.errors)))
| 44.181384 | 156 | 0.571791 |
3c0bac7deda047e72d15e15c3a85de947ea170e3
| 16,663 |
py
|
Python
|
calico/felix/test/test_dispatch.py
|
tomdee/felix
|
79d9cba3b13ad078b19a9eabb52f1cafc52a1a01
|
[
"Apache-2.0"
] | null | null | null |
calico/felix/test/test_dispatch.py
|
tomdee/felix
|
79d9cba3b13ad078b19a9eabb52f1cafc52a1a01
|
[
"Apache-2.0"
] | null | null | null |
calico/felix/test/test_dispatch.py
|
tomdee/felix
|
79d9cba3b13ad078b19a9eabb52f1cafc52a1a01
|
[
"Apache-2.0"
] | 1 |
2016-12-02T12:08:32.000Z
|
2016-12-02T12:08:32.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014, 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_dispatch
~~~~~~~~~~~~~~~~~~~~~~~~
Tests of the actor that controls the top-level dispatch chain.
"""
import collections
from pprint import pformat
import mock
from calico.felix.test.base import BaseTestCase
from calico.felix.dispatch import (
DispatchChains, CHAIN_TO_ENDPOINT, CHAIN_FROM_ENDPOINT
)
# A mocked config object for use with interface_to_suffix.
Config = collections.namedtuple('Config', ['IFACE_PREFIX', 'METADATA_IP', 'METADATA_PORT'])
class TestDispatchChains(BaseTestCase):
"""
Tests for the DispatchChains actor.
"""
def setUp(self):
super(TestDispatchChains, self).setUp()
self.iptables_updater = mock.MagicMock()
self.config = Config('tap', None, 8775)
def getDispatchChain(self):
return DispatchChains(
config=self.config,
ip_version=4,
iptables_updater=self.iptables_updater
)
def assert_iptables_update(self,
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names):
# We only care about positional arguments
args = args[0]
# The DispatchChains object stores the endpoints in a set, which means
# that when it builds the list of goto rules they can be emitted in any
# order. However, the DROP rule must always appear at the end. To do
# that, first check that the updates contain the same rules in any
# order (using assertItemsEqual), and then confirm that the last rule
# is the DROP rule.
self.assertItemsEqual(args[0][CHAIN_TO_ENDPOINT], to_updates)
self.assertItemsEqual(args[0][CHAIN_FROM_ENDPOINT], from_updates)
self.assertEqual(args[0][CHAIN_TO_ENDPOINT][-1], to_updates[-1])
self.assertEqual(args[0][CHAIN_FROM_ENDPOINT][-1], from_updates[-1])
# Confirm that the dependency sets match.
self.assertEqual(args[1][CHAIN_TO_ENDPOINT], to_chain_names)
self.assertEqual(args[1][CHAIN_FROM_ENDPOINT], from_chain_names)
def test_applying_metadata(self):
"""
Tests that a snapshot with metadata works OK.
"""
self.config = Config('tap', '127.0.0.1', 8775)
d = self.getDispatchChain()
ifaces = ['tapabcdef', 'tap123456', 'tapb7d849']
d.apply_snapshot(ifaces, async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --in-interface tapabcdef --goto felix-from-abcdef',
'--append felix-FROM-ENDPOINT --in-interface tap123456 --goto felix-from-123456',
'--append felix-FROM-ENDPOINT --in-interface tapb7d849 --goto felix-from-b7d849',
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --out-interface tapabcdef --goto felix-to-abcdef',
'--append felix-TO-ENDPOINT --out-interface tap123456 --goto felix-to-123456',
'--append felix-TO-ENDPOINT --out-interface tapb7d849 --goto felix-to-b7d849',
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set(['felix-from-abcdef', 'felix-from-123456', 'felix-from-b7d849'])
to_chain_names = set(['felix-to-abcdef', 'felix-to-123456', 'felix-to-b7d849'])
self.iptables_updater.assertCalledOnce()
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args, to_updates, from_updates, to_chain_names,
from_chain_names
)
def test_tree_building(self):
d = self.getDispatchChain()
d.programmed_leaf_chains.add("felix-FROM-EP-PFX-a")
d.programmed_leaf_chains.add("felix-FROM-EP-PFX-z")
ifaces = ['tapa1', 'tapa2', 'tapa3',
'tapb1', 'tapb2',
'tapc']
to_delete, deps, updates, new_leaf_chains = d._calculate_update(ifaces)
self.assertEqual(to_delete, set(["felix-FROM-EP-PFX-z"]))
self.assertEqual(deps, {
'felix-TO-ENDPOINT': set(
['felix-FROM-EP-PFX-a', 'felix-FROM-EP-PFX-b', 'felix-to-c']),
'felix-FROM-ENDPOINT': set(
['felix-TO-EP-PFX-a', 'felix-TO-EP-PFX-b', 'felix-from-c']),
'felix-TO-EP-PFX-a': set(['felix-to-a1', 'felix-to-a2', 'felix-to-a3']),
'felix-TO-EP-PFX-b': set(['felix-to-b1', 'felix-to-b2']),
'felix-FROM-EP-PFX-a': set(['felix-from-a1',
'felix-from-a2',
'felix-from-a3']),
'felix-FROM-EP-PFX-b': set(['felix-from-b2', 'felix-from-b1']),
})
for chain_name, chain_updates in updates.items():
chain_updates[:] = sorted(chain_updates[:-1]) + chain_updates[-1:]
print "Updates:", pformat(updates)
self.assertEqual(updates, {
'felix-TO-ENDPOINT': [
# If there are multiple endpoints with a prefix, we get a
# prefix match.
'--append felix-TO-ENDPOINT --out-interface tapa+ --goto felix-TO-EP-PFX-a',
'--append felix-TO-ENDPOINT --out-interface tapb+ --goto felix-TO-EP-PFX-b',
# If there's only one, we don't.
'--append felix-TO-ENDPOINT --out-interface tapc --goto felix-to-c',
'--append felix-TO-ENDPOINT --jump DROP'],
'felix-FROM-ENDPOINT': [
'--append felix-FROM-ENDPOINT --in-interface tapa+ --goto felix-FROM-EP-PFX-a',
'--append felix-FROM-ENDPOINT --in-interface tapb+ --goto felix-FROM-EP-PFX-b',
'--append felix-FROM-ENDPOINT --in-interface tapc --goto felix-from-c',
'--append felix-FROM-ENDPOINT --jump DROP'],
'felix-FROM-EP-PFX-a': [
# Per-prefix chain has one entry per endpoint.
'--append felix-FROM-EP-PFX-a --in-interface tapa1 --goto felix-from-a1',
'--append felix-FROM-EP-PFX-a --in-interface tapa2 --goto felix-from-a2',
'--append felix-FROM-EP-PFX-a --in-interface tapa3 --goto felix-from-a3',
# And a trailing drop.
'--append felix-FROM-EP-PFX-a --jump DROP'],
'felix-FROM-EP-PFX-b': [
'--append felix-FROM-EP-PFX-b --in-interface tapb1 --goto felix-from-b1',
'--append felix-FROM-EP-PFX-b --in-interface tapb2 --goto felix-from-b2',
'--append felix-FROM-EP-PFX-b --jump DROP'],
'felix-TO-EP-PFX-a': [
'--append felix-TO-EP-PFX-a --out-interface tapa1 --goto felix-to-a1',
'--append felix-TO-EP-PFX-a --out-interface tapa2 --goto felix-to-a2',
'--append felix-TO-EP-PFX-a --out-interface tapa3 --goto felix-to-a3',
'--append felix-TO-EP-PFX-a --jump DROP'],
'felix-TO-EP-PFX-b': [
'--append felix-TO-EP-PFX-b --out-interface tapb1 --goto felix-to-b1',
'--append felix-TO-EP-PFX-b --out-interface tapb2 --goto felix-to-b2',
'--append felix-TO-EP-PFX-b --jump DROP']
})
def test_applying_snapshot_clean(self):
"""
Tests that a snapshot can be applied to a previously unused actor.
"""
d = self.getDispatchChain()
ifaces = ['tapabcdef', 'tap123456', 'tapb7d849']
d.apply_snapshot(ifaces, async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --in-interface tapabcdef --goto felix-from-abcdef',
'--append felix-FROM-ENDPOINT --in-interface tap123456 --goto felix-from-123456',
'--append felix-FROM-ENDPOINT --in-interface tapb7d849 --goto felix-from-b7d849',
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --out-interface tapabcdef --goto felix-to-abcdef',
'--append felix-TO-ENDPOINT --out-interface tap123456 --goto felix-to-123456',
'--append felix-TO-ENDPOINT --out-interface tapb7d849 --goto felix-to-b7d849',
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set(['felix-from-abcdef', 'felix-from-123456', 'felix-from-b7d849'])
to_chain_names = set(['felix-to-abcdef', 'felix-to-123456', 'felix-to-b7d849'])
self.iptables_updater.assertCalledOnce()
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names
)
def test_applying_snapshot_dirty(self):
"""
Tests that a snapshot can be applied to an actor that used to have
state.
"""
d = self.getDispatchChain()
# Insert some chains I don't want to see.
d.apply_snapshot(['tapxyz', 'tap889900', 'tapundefined'], async=True)
self.step_actor(d)
ifaces = ['tapabcdef', 'tap123456', 'tapb7d849']
d.apply_snapshot(ifaces, async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --in-interface tapabcdef --goto felix-from-abcdef',
'--append felix-FROM-ENDPOINT --in-interface tap123456 --goto felix-from-123456',
'--append felix-FROM-ENDPOINT --in-interface tapb7d849 --goto felix-from-b7d849',
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --out-interface tapabcdef --goto felix-to-abcdef',
'--append felix-TO-ENDPOINT --out-interface tap123456 --goto felix-to-123456',
'--append felix-TO-ENDPOINT --out-interface tapb7d849 --goto felix-to-b7d849',
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set(['felix-from-abcdef', 'felix-from-123456', 'felix-from-b7d849'])
to_chain_names = set(['felix-to-abcdef', 'felix-to-123456', 'felix-to-b7d849'])
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 2)
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names
)
def test_applying_empty_snapshot(self):
"""
Tests that an empty snapshot can be applied to an actor that used to
have state.
"""
d = self.getDispatchChain()
# Insert some chains I don't want to see.
d.apply_snapshot(['tapxyz', 'tap889900', 'tapundefined'], async=True)
self.step_actor(d)
# Clear it out
d.apply_snapshot([], async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set()
to_chain_names = set()
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 2)
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names
)
def test_on_endpoint_added_simple(self):
"""
Tests that adding an endpoint, adds it to the state.
"""
d = self.getDispatchChain()
# Insert some basic chains.
d.apply_snapshot(['tapabcdef', 'tap123456'], async=True)
self.step_actor(d)
# Add one endpoint.
d.on_endpoint_added('tapb7d849', async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --in-interface tapabcdef --goto felix-from-abcdef',
'--append felix-FROM-ENDPOINT --in-interface tap123456 --goto felix-from-123456',
'--append felix-FROM-ENDPOINT --in-interface tapb7d849 --goto felix-from-b7d849',
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --out-interface tapabcdef --goto felix-to-abcdef',
'--append felix-TO-ENDPOINT --out-interface tap123456 --goto felix-to-123456',
'--append felix-TO-ENDPOINT --out-interface tapb7d849 --goto felix-to-b7d849',
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set(['felix-from-abcdef', 'felix-from-123456', 'felix-from-b7d849'])
to_chain_names = set(['felix-to-abcdef', 'felix-to-123456', 'felix-to-b7d849'])
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 2)
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names
)
def test_on_endpoint_added_idempotent(self):
"""
Tests that adding an endpoint that's already present does nothing.
"""
d = self.getDispatchChain()
# Insert some basic chains.
d.apply_snapshot(['tapabcdef', 'tap123456', 'tapb7d849'], async=True)
self.step_actor(d)
# Add an endpoint we already have.
d.on_endpoint_added('tapabcdef', async=True)
self.step_actor(d)
# Confirm that we only got called once.
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 1)
def test_on_endpoint_removed_basic(self):
"""
Tests that we can remove an endpoint.
"""
d = self.getDispatchChain()
# Insert some basic chains.
d.apply_snapshot(['tapabcdef', 'tap123456', 'tapb7d849'], async=True)
self.step_actor(d)
# Remove an endpoint.
d.on_endpoint_removed('tapabcdef', async=True)
self.step_actor(d)
from_updates = [
'--append felix-FROM-ENDPOINT --in-interface tap123456 --goto felix-from-123456',
'--append felix-FROM-ENDPOINT --in-interface tapb7d849 --goto felix-from-b7d849',
'--append felix-FROM-ENDPOINT --jump DROP',
]
to_updates = [
'--append felix-TO-ENDPOINT --out-interface tap123456 --goto felix-to-123456',
'--append felix-TO-ENDPOINT --out-interface tapb7d849 --goto felix-to-b7d849',
'--append felix-TO-ENDPOINT --jump DROP',
]
from_chain_names = set(['felix-from-123456', 'felix-from-b7d849'])
to_chain_names = set(['felix-to-123456', 'felix-to-b7d849'])
# Confirm that we got called twice.
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 2)
args = self.iptables_updater.rewrite_chains.call_args
self.assert_iptables_update(
args,
to_updates,
from_updates,
to_chain_names,
from_chain_names
)
def test_on_endpoint_removed_idempotent(self):
"""
Tests that removing an endpoint multiple times does nothing.
"""
d = self.getDispatchChain()
# Insert some basic chains.
d.apply_snapshot(['tapabcdef', 'tap123456', 'tapb7d849'], async=True)
self.step_actor(d)
# Remove an endpoint.
d.on_endpoint_removed('tapabcdef', async=True)
self.step_actor(d)
# Remove it a few more times for good measure.
d.on_endpoint_removed('tapabcdef', async=True)
self.step_actor(d)
d.on_endpoint_removed('tapabcdef', async=True)
self.step_actor(d)
d.on_endpoint_removed('tapabcdef', async=True)
self.step_actor(d)
# Confirm that we only got called twice.
self.assertEqual(self.iptables_updater.rewrite_chains.call_count, 2)
| 41.866834 | 95 | 0.601452 |
24cc9c104b85c2fa7bb5dd05fa85c2271b659a73
| 5,556 |
py
|
Python
|
vnpy/app/stock_screener/template.py
|
liuying3013/vnpy
|
e1cc1ea4af5fa6ec9a31e5b954c19cfaa0a3130e
|
[
"MIT"
] | 323 |
2015-11-21T14:45:29.000Z
|
2022-03-16T08:54:37.000Z
|
vnpy/app/stock_screener/template.py
|
liuying3013/vnpy
|
e1cc1ea4af5fa6ec9a31e5b954c19cfaa0a3130e
|
[
"MIT"
] | 9 |
2017-03-21T08:26:21.000Z
|
2021-08-23T06:41:17.000Z
|
vnpy/app/stock_screener/template.py
|
liuying3013/vnpy
|
e1cc1ea4af5fa6ec9a31e5b954c19cfaa0a3130e
|
[
"MIT"
] | 148 |
2016-09-26T03:25:39.000Z
|
2022-02-06T14:43:48.000Z
|
""""""
import os
import bz2
import pickle
import traceback
import zlib
from abc import ABC
from copy import copy, deepcopy
from typing import Any, Callable
from logging import INFO, ERROR
from datetime import datetime, timedelta
from vnpy.trader.constant import Interval, Direction, Offset, Status, OrderType, Color, Exchange
from vnpy.trader.object import BarData, TickData, OrderData, TradeData
from vnpy.trader.utility import virtual, append_data, extract_vt_symbol, get_underlying_symbol
from vnpy.component.cta_grid_trade import CtaGrid, CtaGridTrade, LOCK_GRID
class ScreenerTemplate(ABC):
"""选股策略模板"""
author = ""
parameters = [] # 选股参数
variables = [] # 选股运行变量
results = [] # 选股结果
def __init__(
self,
engine: Any,
strategy_name: str,
setting: dict,
):
self.engine = engine
self.strategy_name = strategy_name
self.inited = False # 是否初始化完毕
self.running = False # 是否开始执行选股
self.completed = False # 是否已经执行完毕
self.klines = {} # 所有K线
self.update_setting(setting)
def update_setting(self, setting: dict):
"""
Update strategy parameter wtih value in setting dict.
"""
for name in self.parameters:
if name in setting:
setattr(self, name, setting[name])
def write_log(self, msg: str, level: int = INFO):
"""
Write a log message.
"""
self.engine.write_log(msg=msg, strategy_name=self.strategy_name, level=level)
def write_error(self, msg: str):
"""write error log message"""
self.write_log(msg=msg, level=ERROR)
@virtual
def on_timer(self):
pass
@virtual
def on_init(self):
"""
Callback when strategy is inited.
"""
pass
@virtual
def on_start(self):
"""
Callback when strategy is started.
"""
pass
def check_adjust(self, vt_symbol):
"""
检查股票的最新除权时间,是否在一周内
:param vt_symbol:
:return: True: 一周内没有发生除权; False:一周内发生过除权
"""
last_adjust_factor = self.engine.get_adjust_factor(vt_symbol)
if last_adjust_factor is None:
return True
last_adjust_date = last_adjust_factor.get('dividOperateDate', None)
# 最后在除权出息日,发生在一周内
if last_adjust_date and (datetime.now() - timedelta(days=7)).strftime('%Y-%m-%d') <= last_adjust_date:
self.write_log(
'{}[{}]发生除权除息,日期:{}'.format(vt_symbol, last_adjust_factor.get('name'), last_adjust_date))
return False
return True
def save_klines_to_cache(self, symbol, kline_names: list = []):
"""
保存K线数据到缓存
:param kline_names: 一般为self.klines的keys
:return:
"""
if len(kline_names) == 0:
kline_names = [s for s in list(self.klines.keys()) if s.startswith(symbol)]
# 获取保存路径
save_path = os.path.abspath(os.path.join(self.engine.get_data_path(), 'klines'))
if not os.path.exists(save_path):
os.makedirs(save_path)
# 保存缓存的文件名(考虑到超多得股票,根据每个合约进行拆分)
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_{symbol}_klines.pkb2'))
with bz2.BZ2File(file_name, 'wb') as f:
klines = {}
for kline_name in kline_names:
kline = self.klines.get(kline_name, None)
# if kline:
# kline.strategy = None
# kline.cb_on_bar = None
klines.update({kline_name: kline})
pickle.dump(klines, f)
def load_klines_from_cache(self, symbol, kline_names: list = []):
"""
从缓存加载K线数据
:param kline_names:
:return:
"""
if len(kline_names) == 0:
kline_names = list(self.klines.keys())
# 获取保存路径
save_path = os.path.abspath(os.path.join(self.engine.get_data_path(), 'klines'))
# 根据策略名称+股票合约进行读取
file_name = os.path.abspath(os.path.join(save_path, f'{self.strategy_name}_{symbol}_klines.pkb2'))
try:
last_bar_dt = None
with bz2.BZ2File(file_name, 'rb') as f:
klines = pickle.load(f)
# 逐一恢复K线
for kline_name in kline_names:
# 缓存的k线实例
cache_kline = klines.get(kline_name, None)
# 当前策略实例的K线实例
strategy_kline = self.klines.get(kline_name, None)
if cache_kline and strategy_kline:
# 临时保存当前的回调函数
cb_on_bar = strategy_kline.cb_on_bar
# 缓存实例数据 =》 当前实例数据
strategy_kline.__dict__.update(cache_kline.__dict__)
# 所有K线的最后时间
if last_bar_dt and strategy_kline.cur_datetime:
last_bar_dt = max(last_bar_dt, strategy_kline.cur_datetime)
else:
last_bar_dt = strategy_kline.cur_datetime
# 重新绑定k线策略与on_bar回调函数
strategy_kline.strategy = self
strategy_kline.cb_on_bar = cb_on_bar
self.write_log(f'恢复{kline_name}缓存数据,最新bar结束时间:{last_bar_dt}')
self.write_log(u'加载缓存k线数据完毕')
return last_bar_dt
except Exception as ex:
self.write_error(f'加载缓存K线数据失败:{str(ex)}')
return None
| 32.87574 | 110 | 0.571994 |
c214896a9926cb14f1237c964580b79165b09f6a
| 135 |
py
|
Python
|
contrib/splitSpectrum/__init__.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,133 |
2022-01-07T21:24:57.000Z
|
2022-01-07T21:33:08.000Z
|
contrib/splitSpectrum/__init__.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 276 |
2019-02-10T07:18:28.000Z
|
2022-03-31T21:45:55.000Z
|
contrib/splitSpectrum/__init__.py
|
vincentschut/isce2
|
1557a05b7b6a3e65abcfc32f89c982ccc9b65e3c
|
[
"ECL-2.0",
"Apache-2.0"
] | 235 |
2019-02-10T05:00:53.000Z
|
2022-03-18T07:37:24.000Z
|
#!/usr/bin/env python
def SplitRangeSpectrum():
from .splitSpectrum import PySplitRangeSpectrum
return PySplitRangeSpectrum()
| 22.5 | 51 | 0.777778 |
3dc4dc4bfce3f554435f9aa8d99611824a9a329a
| 8,013 |
py
|
Python
|
notebooks/md/data/md_equil/4AT3/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | null | null | null |
notebooks/md/data/md_equil/4AT3/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | 1 |
2021-07-30T15:01:53.000Z
|
2021-08-02T09:48:08.000Z
|
notebooks/md/data/md_equil/4AT3/md_equil.py
|
openkinome/study-ntrk-resistance
|
3c4ba64538ec55c774d34c9d7cd0a5e4665dc9d5
|
[
"MIT"
] | null | null | null |
import argparse
import os
import sys
from sys import stdout
import mdtraj as md
import numpy as np
import parmed
import simtk.openmm as mm
import simtk.openmm.app as app
import simtk.unit as unit
from openforcefield.topology import Molecule, Topology
from openmmforcefields.generators import SystemGenerator
from perses.utils.openeye import OEMol_to_omm_ff, createOEMolFromSDF
from simtk.openmm import MonteCarloBarostat, XmlSerializer
from simtk.openmm.app import CheckpointReporter, ForceField, PDBFile
from simtk.openmm.app.pdbreporter import PDBReporter
from simtk.openmm.app.statedatareporter import StateDataReporter
# Read arguments to get ligand
parser = argparse.ArgumentParser()
parser.add_argument(
"-ligand",
help="the docked ligand to be prepared for simulation",
choices=["larotrectinib", "selitrectinib", "repotrectinib"],
type=str,
)
args = parser.parse_args()
chosen_ligand = args.ligand
# Parameters
print("--> Reading parameters")
pressure = 1.0 * unit.bar
temperature = 300 * unit.kelvin
nonbonded_method = app.PME
constraints = app.HBonds
remove_cm_motion = True
collision_rate = 1.0 / unit.picoseconds
timestep = 0.002 * unit.picoseconds
solvent_padding = 10.0 * unit.angstrom
ionic_strength = 150 * unit.millimolar
# Forcefield
protein_forcefield = "amber14/protein.ff14SB.xml"
small_molecule_forcefield = "openff-1.1.0"
solvation_forcefield = "amber14/tip3p.xml"
forcefield = ForceField(protein_forcefield, solvation_forcefield)
# Set steps and frequencies
nsteps = 2500000 # 5 ns
report_freq = 100
chk_freq = 500
traj_freq = 1000 # 2500 frames
# Set the input file names
input_pdb = "4AT3_prepped.pdb"
input_ligands_sdf = "../../structures_from_docking/4AT3_hybrid_docking.sdf"
# Create output directory
output_prefix = "./output/" + chosen_ligand
os.makedirs(output_prefix, exist_ok=True)
print("--> Directory ", output_prefix, " created ")
# Set file names
integrator_xml_filename = "integrator_2fs.xml"
state_xml_filename = "equilibrated_state_5ns.xml"
state_pdb_filename = "equilibrated_state_5ns.pdb"
system_xml_filename = "equilibrated_system_5ns.xml"
checkpoint_filename = "equilibrated_checkpoint_5ns.chk"
traj_output_filename = "equilibrated_traj_5ns.xtc"
# Define the barostat for the system
barostat = mm.MonteCarloBarostat(pressure, temperature)
# Load and sort ligands
molecules = Molecule.from_file(input_ligands_sdf)
ligand_names = ["larotrectinib", "selitrectinib", "repotrectinib"]
ligand_dict = dict(zip(ligand_names, molecules)) # Create dict for easy access later
# Make the SystemGenerator
system_generator = SystemGenerator(
forcefields=[protein_forcefield, solvation_forcefield],
barostat=barostat,
periodic_forcefield_kwargs={"nonbondedMethod": app.PME},
small_molecule_forcefield=small_molecule_forcefield,
molecules=ligand_dict[chosen_ligand],
)
# Read in the PDB and create an OpenMM topology
pdbfile = app.PDBFile(input_pdb)
protein_topology, protein_positions = pdbfile.topology, pdbfile.positions
# Add ligand to topology - credit to @hannahbrucemacdonald for help here
print("--> Combining protein and ligand topologies")
off_ligand_topology = Topology.from_molecules(ligand_dict[chosen_ligand])
ligand_topology = off_ligand_topology.to_openmm()
ligand_positions = ligand_dict[chosen_ligand].conformers[0]
md_protein_topology = md.Topology.from_openmm(
protein_topology
) # using mdtraj for protein top
md_ligand_topology = md.Topology.from_openmm(
ligand_topology
) # using mdtraj for ligand top
md_complex_topology = md_protein_topology.join(md_ligand_topology) # add them together
complex_topology = md_complex_topology.to_openmm() # now back to openmm
total_atoms = len(protein_positions) + len(ligand_positions)
complex_positions = unit.Quantity(np.zeros([total_atoms, 3]), unit=unit.nanometers)
complex_positions[0 : len(protein_positions)] = protein_positions
for index, atom in enumerate(ligand_positions, len(protein_positions)):
coords = atom / atom.unit
complex_positions[index] = (
coords / 10.0
) * unit.nanometers # since openmm works in nm
# Add hydrogens and solvate the system
modeller = app.Modeller(complex_topology, complex_positions)
print("Adding hydrogens to the system...")
modeller.addHydrogens(system_generator.forcefield)
print("Solvating the system...")
modeller.addSolvent(
forcefield=system_generator.forcefield,
model="tip3p",
ionicStrength=ionic_strength,
padding=solvent_padding,
)
# Create an OpenMM system
print("--> Creating an OpenMM system")
system = system_generator.create_system(modeller.topology)
# Make and serialize integrator - Langevin dynamics
print(
"Serializing integrator to %s"
% os.path.join(output_prefix, integrator_xml_filename)
)
integrator = mm.LangevinIntegrator(
temperature, collision_rate, timestep # Friction coefficient
)
with open(os.path.join(output_prefix, integrator_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(integrator)
outfile.write(xml)
# Define the platform to use; CUDA, OpenCL, CPU, or Reference. Or do not specify
# the platform to use the default (fastest) platform
# platform = mm.Platform.getPlatformByName("OpenCL")
# prop = dict(OpenCLPrecision="mixed") # Use mixed single/double precision
# Create the Simulation object
sim = app.Simulation(modeller.topology, system, integrator) # , platform, prop)
# Set the particle positions
sim.context.setPositions(modeller.positions)
# Minimize the energy
print("--> Minimising energy with docked ligand: " + chosen_ligand)
print(
" initial : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
sim.minimizeEnergy()
print(
" final : %8.3f kcal/mol"
% (
sim.context.getState(getEnergy=True).getPotentialEnergy()
/ unit.kilocalories_per_mole
)
)
# set starting velocities:
print("--> Generating random starting velocities")
sim.context.setVelocitiesToTemperature(temperature * unit.kelvin)
# write limited state information to standard out:
sim.reporters.append(
StateDataReporter(
stdout,
reportInterval=report_freq,
step=True,
time=True,
potentialEnergy=True,
kineticEnergy=True,
temperature=True,
speed=True,
progress=True,
remainingTime=True,
totalSteps=nsteps,
separator="\t",
)
)
# Write to checkpoint files regularly:
sim.reporters.append(
CheckpointReporter(
file=os.path.join(output_prefix, checkpoint_filename), reportInterval=chk_freq
)
)
# Write out the trajectory
sim.reporters.append(
md.reporters.XTCReporter(
file=os.path.join(output_prefix, traj_output_filename), reportInterval=traj_freq
)
)
# Run NPT dynamics
print("--> Running dynamics in the NPT ensemble for the 4AT3:" + chosen_ligand + " complex")
sim.step(nsteps)
# Save and serialize the final state
print("--> Serializing state to %s" % os.path.join(output_prefix, state_xml_filename))
state = sim.context.getState(
getPositions=True, getVelocities=True, getEnergy=True, getForces=True
)
with open(os.path.join(output_prefix, state_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(state)
outfile.write(xml)
# Save the final state as a PDB
print("--> Saving final state as %s" % os.path.join(output_prefix, state_pdb_filename))
with open(os.path.join(output_prefix, state_pdb_filename), "w") as outfile:
PDBFile.writeFile(
sim.topology,
sim.context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True,
)
# Save and serialize system
print("--> Serializing system to %s" % os.path.join(output_prefix, system_xml_filename))
system.setDefaultPeriodicBoxVectors(*state.getPeriodicBoxVectors())
with open(os.path.join(output_prefix, system_xml_filename), "w") as outfile:
xml = mm.XmlSerializer.serialize(system)
outfile.write(xml)
| 33.527197 | 92 | 0.762636 |
e9d550acc05f81a33fc09d68140916f102ccbc66
| 836 |
py
|
Python
|
angrmanagement/data/jobs/flirt_signature_recognition.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/data/jobs/flirt_signature_recognition.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/data/jobs/flirt_signature_recognition.py
|
DennyDai/angr-management
|
8a4ba5dafbf2f4d2ba558528a0d1ae099a199a04
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import TYPE_CHECKING
import logging
import angr.flirt
from .job import Job
if TYPE_CHECKING:
from angrmanagement.data.instance import Instance
_l = logging.getLogger(name=__name__)
class FlirtSignatureRecognitionJob(Job):
"""
Describes a job for using FLIRT signatures to recognize and match library functions embedded in a binary.
"""
def __init__(self, on_finish=None):
super().__init__(name="Applying FLIRT signatures", on_finish=on_finish)
def _run(self, inst: 'Instance'):
if inst.project.arch.name.lower() in angr.flirt.FLIRT_SIGNATURES_BY_ARCH:
inst.project.analyses.Flirt()
else:
_l.warning("No FLIRT signatures exist for architecture %s.", inst.project.arch.name)
def __repr__(self):
return "FlirtSignatureRecognitionJob"
| 27.866667 | 109 | 0.716507 |
669d6eb68d7566143116862af8ac16c248e8346b
| 1,075 |
py
|
Python
|
examples/docs_snippets/docs_snippets/overview/object_managers/custom_object_manager.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets/docs_snippets/overview/object_managers/custom_object_manager.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
examples/docs_snippets/docs_snippets/overview/object_managers/custom_object_manager.py
|
coderanger/dagster
|
d3e323f8ed55cd906d6f44f19595348ea1580b2d
|
[
"Apache-2.0"
] | null | null | null |
"""isort:skip_file"""
from dagster import solid
@solid
def solid1(_):
pass
@solid
def solid2(_, _a):
pass
def write_dataframe_to_table(**_kwargs):
pass
def read_dataframe_from_table(**_kwargs):
pass
# start_marker
from dagster import ObjectManager, ModeDefinition, object_manager, pipeline
class DataframeTableObjectManager(ObjectManager):
def handle_output(self, context, obj):
# name is the name given to the OutputDefinition that we're storing for
table_name = context.name
write_dataframe_to_table(name=table_name, dataframe=obj)
def load_input(self, context):
# upstream_output.name is the name given to the OutputDefinition that we're loading for
table_name = context.upstream_output.name
return read_dataframe_from_table(name=table_name)
@object_manager
def df_table_object_manager(_):
return DataframeTableObjectManager()
@pipeline(mode_defs=[ModeDefinition(resource_defs={"object_manager": df_table_object_manager})])
def my_pipeline():
solid2(solid1())
# end_marker
| 21.5 | 96 | 0.746047 |
dabd1e61620640776cf966b98fc0526a8f6a7be9
| 460 |
py
|
Python
|
pandora-ckz/pandora/jade/ext/pyramid/__init__.py
|
williamlagos/django-coding
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | null | null | null |
pandora-ckz/pandora/jade/ext/pyramid/__init__.py
|
williamlagos/django-coding
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | 21 |
2020-03-24T18:18:22.000Z
|
2021-03-31T20:18:53.000Z
|
pandora-ckz/pandora/jade/ext/pyramid/__init__.py
|
efforia/dashboard
|
246dc1aba32eae0b035c407de3e8fe954606b776
|
[
"MIT"
] | null | null | null |
from pyramid import mako_templating
from jade.ext.mako import preprocessor
def includeme(config):
config.add_renderer(".jade", PyjadeRenderer)
class PyjadeRenderer(object):
"""
The jade renderer
"""
def __init__(self, info):
info.settings['mako.preprocessor'] = preprocessor
self.makoRenderer = mako_templating.renderer_factory(info)
def __call__(self, value, system):
return self.makoRenderer(value, system)
| 27.058824 | 66 | 0.713043 |
49d3f848b6ae8d2453fdd85689d38362edbc9eee
| 1,844 |
py
|
Python
|
qiskit/qasm/_node/_customunitary.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | 1 |
2017-07-12T02:04:53.000Z
|
2017-07-12T02:04:53.000Z
|
qiskit/qasm/_node/_customunitary.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | null | null | null |
qiskit/qasm/_node/_customunitary.py
|
Phonemetra/TurboQuantum
|
c168d6dda361258ca1ffce60e7e8ac5d10e69f06
|
[
"Apache-2.0"
] | 6 |
2018-05-27T10:52:02.000Z
|
2021-04-02T19:20:11.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate statement.
Author: Jim Challenger
"""
from ._node import Node
class CustomUnitary(Node):
"""Node for an OPENQASM custom gate statement.
children[0] is an id node.
children[1] is an exp_list (if len==3) or primary_list.
children[2], if present, is a primary_list.
Has properties:
.id = id node
.name = gate name string
.arguments = None or exp_list node
.bitlist = primary_list node
"""
def __init__(self, children):
"""Create the custom gate node."""
Node.__init__(self, 'custom_unitary', children, None)
self.id = children[0]
self.name = self.id.name
if len(children) == 3:
self.arguments = children[1]
self.bitlist = children[2]
else:
self.arguments = None
self.bitlist = children[1]
def qasm(self):
"""Return the corresponding OPENQASM string."""
string = self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm() + ")"
string += " " + self.bitlist.qasm() + ";"
return string
| 31.254237 | 79 | 0.618221 |
9b6fb5cde0163b59aea599ceadb9398109e9e94a
| 4,111 |
py
|
Python
|
azure/mgmt/network/v2017_09_01/models/metric_specification.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 |
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
azure/mgmt/network/v2017_09_01/models/metric_specification.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
azure/mgmt/network/v2017_09_01/models/metric_specification.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MetricSpecification(Model):
"""Description of metrics specification.
:param name: The name of the metric.
:type name: str
:param display_name: The display name of the metric.
:type display_name: str
:param display_description: The description of the metric.
:type display_description: str
:param unit: Units the metric to be displayed in.
:type unit: str
:param aggregation_type: The aggregation type.
:type aggregation_type: str
:param availabilities: List of availability.
:type availabilities:
list[~azure.mgmt.network.v2017_09_01.models.Availability]
:param enable_regional_mdm_account: Whether regional MDM account enabled.
:type enable_regional_mdm_account: bool
:param fill_gap_with_zero: Whether gaps would be filled with zeros.
:type fill_gap_with_zero: bool
:param metric_filter_pattern: Pattern for the filter of the metric.
:type metric_filter_pattern: str
:param dimensions: List of dimensions.
:type dimensions: list[~azure.mgmt.network.v2017_09_01.models.Dimension]
:param is_internal: Whether the metric is internal.
:type is_internal: bool
:param source_mdm_account: The source MDM account.
:type source_mdm_account: str
:param source_mdm_namespace: The source MDM namespace.
:type source_mdm_namespace: str
:param resource_id_dimension_name_override: The resource Id dimension name
override.
:type resource_id_dimension_name_override: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'availabilities': {'key': 'availabilities', 'type': '[Availability]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
}
def __init__(self, name=None, display_name=None, display_description=None, unit=None, aggregation_type=None, availabilities=None, enable_regional_mdm_account=None, fill_gap_with_zero=None, metric_filter_pattern=None, dimensions=None, is_internal=None, source_mdm_account=None, source_mdm_namespace=None, resource_id_dimension_name_override=None):
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.availabilities = availabilities
self.enable_regional_mdm_account = enable_regional_mdm_account
self.fill_gap_with_zero = fill_gap_with_zero
self.metric_filter_pattern = metric_filter_pattern
self.dimensions = dimensions
self.is_internal = is_internal
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.resource_id_dimension_name_override = resource_id_dimension_name_override
| 50.134146 | 350 | 0.68791 |
dcaaded9f5453655c24bbb85e0115b8bb2fb7008
| 6,700 |
py
|
Python
|
modules/image/semantic_segmentation/bisenetv2_cityscapes/layers.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 8,360 |
2019-01-18T10:46:45.000Z
|
2022-03-31T14:50:02.000Z
|
modules/image/semantic_segmentation/bisenetv2_cityscapes/layers.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 1,158 |
2019-04-11T09:22:43.000Z
|
2022-03-31T12:12:09.000Z
|
modules/image/semantic_segmentation/bisenetv2_cityscapes/layers.py
|
AK391/PaddleHub
|
a51ab7447e089776766becb3297e560dfed98573
|
[
"Apache-2.0"
] | 1,677 |
2019-04-09T15:07:40.000Z
|
2022-03-31T06:41:10.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
def SyncBatchNorm(*args, **kwargs):
"""In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead"""
if paddle.get_device() == 'cpu' or os.environ.get('PADDLESEG_EXPORT_STAGE'):
return nn.BatchNorm2D(*args, **kwargs)
else:
return nn.SyncBatchNorm(*args, **kwargs)
class ConvBNReLU(nn.Layer):
"""Basic conv bn relu layer."""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, padding: str = 'same', **kwargs):
super().__init__()
self._conv = nn.Conv2D(in_channels, out_channels, kernel_size, padding=padding, **kwargs)
self._batch_norm = SyncBatchNorm(out_channels)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self._conv(x)
x = self._batch_norm(x)
x = F.relu(x)
return x
class ConvBN(nn.Layer):
"""Basic conv bn layer."""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, padding: str = 'same', **kwargs):
super().__init__()
self._conv = nn.Conv2D(in_channels, out_channels, kernel_size, padding=padding, **kwargs)
self._batch_norm = SyncBatchNorm(out_channels)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self._conv(x)
x = self._batch_norm(x)
return x
class ConvReLUPool(nn.Layer):
"""Basic conv bn pool layer."""
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.conv = nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1, dilation=1)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self.conv(x)
x = F.relu(x)
x = F.pool2d(x, pool_size=2, pool_type="max", pool_stride=2)
return x
class SeparableConvBNReLU(nn.Layer):
"""Basic separable conv bn relu layer."""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, padding: str = 'same', **kwargs):
super().__init__()
self.depthwise_conv = ConvBN(
in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
groups=in_channels,
**kwargs)
self.piontwise_conv = ConvBNReLU(in_channels, out_channels, kernel_size=1, groups=1)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self.depthwise_conv(x)
x = self.piontwise_conv(x)
return x
class DepthwiseConvBN(nn.Layer):
"""Basic depthwise conv bn relu layer."""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int, padding: str = 'same', **kwargs):
super().__init__()
self.depthwise_conv = ConvBN(
in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
groups=in_channels,
**kwargs)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self.depthwise_conv(x)
return x
class AuxLayer(nn.Layer):
"""
The auxiliary layer implementation for auxiliary loss.
Args:
in_channels (int): The number of input channels.
inter_channels (int): The intermediate channels.
out_channels (int): The number of output channels, and usually it is num_classes.
dropout_prob (float, optional): The drop rate. Default: 0.1.
"""
def __init__(self, in_channels: int, inter_channels: int, out_channels: int, dropout_prob: float = 0.1):
super().__init__()
self.conv_bn_relu = ConvBNReLU(in_channels=in_channels, out_channels=inter_channels, kernel_size=3, padding=1)
self.dropout = nn.Dropout(p=dropout_prob)
self.conv = nn.Conv2D(in_channels=inter_channels, out_channels=out_channels, kernel_size=1)
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
x = self.conv_bn_relu(x)
x = self.dropout(x)
x = self.conv(x)
return x
class Activation(nn.Layer):
"""
The wrapper of activations.
Args:
act (str, optional): The activation name in lowercase. It must be one of ['elu', 'gelu',
'hardshrink', 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid',
'softmax', 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax',
'hsigmoid']. Default: None, means identical transformation.
Returns:
A callable object of Activation.
Raises:
KeyError: When parameter `act` is not in the optional range.
Examples:
from paddleseg.models.common.activation import Activation
relu = Activation("relu")
print(relu)
# <class 'paddle.nn.layer.activation.ReLU'>
sigmoid = Activation("sigmoid")
print(sigmoid)
# <class 'paddle.nn.layer.activation.Sigmoid'>
not_exit_one = Activation("not_exit_one")
# KeyError: "not_exit_one does not exist in the current dict_keys(['elu', 'gelu', 'hardshrink',
# 'tanh', 'hardtanh', 'prelu', 'relu', 'relu6', 'selu', 'leakyrelu', 'sigmoid', 'softmax',
# 'softplus', 'softshrink', 'softsign', 'tanhshrink', 'logsigmoid', 'logsoftmax', 'hsigmoid'])"
"""
def __init__(self, act: str = None):
super(Activation, self).__init__()
self._act = act
upper_act_names = nn.layer.activation.__dict__.keys()
lower_act_names = [act.lower() for act in upper_act_names]
act_dict = dict(zip(lower_act_names, upper_act_names))
if act is not None:
if act in act_dict.keys():
act_name = act_dict[act]
self.act_func = eval("nn.layer.activation.{}()".format(act_name))
else:
raise KeyError("{} does not exist in the current {}".format(act, act_dict.keys()))
def forward(self, x: paddle.Tensor) -> paddle.Tensor:
if self._act is not None:
return self.act_func(x)
else:
return x
| 35.828877 | 118 | 0.639851 |
f462ad4be9d46fae7e70812f6e14b3642ef990a2
| 5,304 |
py
|
Python
|
homeassistant/auth/providers/command_line.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 5 |
2019-02-24T11:46:18.000Z
|
2019-05-28T17:37:21.000Z
|
homeassistant/auth/providers/command_line.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 77 |
2020-07-16T16:43:09.000Z
|
2022-03-31T06:14:37.000Z
|
homeassistant/auth/providers/command_line.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | 6 |
2017-09-01T03:58:05.000Z
|
2021-12-20T10:30:58.000Z
|
"""Auth provider that validates credentials via an external command."""
from __future__ import annotations
import asyncio
import collections
from collections.abc import Mapping
import logging
import os
from typing import Any, cast
import voluptuous as vol
from homeassistant.const import CONF_COMMAND
from homeassistant.data_entry_flow import FlowResult
from homeassistant.exceptions import HomeAssistantError
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
CONF_ARGS = "args"
CONF_META = "meta"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_COMMAND): vol.All(
str, os.path.normpath, msg="must be an absolute path"
),
vol.Optional(CONF_ARGS, default=None): vol.Any(vol.DefaultTo(list), [str]),
vol.Optional(CONF_META, default=False): bool,
},
extra=vol.PREVENT_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
class InvalidAuthError(HomeAssistantError):
"""Raised when authentication with given credentials fails."""
@AUTH_PROVIDERS.register("command_line")
class CommandLineAuthProvider(AuthProvider):
"""Auth provider validating credentials by calling a command."""
DEFAULT_TITLE = "Command Line Authentication"
# which keys to accept from a program's stdout
ALLOWED_META_KEYS = ("name",)
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Extend parent's __init__.
Adds self._user_meta dictionary to hold the user-specific
attributes provided by external programs.
"""
super().__init__(*args, **kwargs)
self._user_meta: dict[str, dict[str, Any]] = {}
async def async_login_flow(self, context: dict | None) -> LoginFlow:
"""Return a flow to login."""
return CommandLineLoginFlow(self)
async def async_validate_login(self, username: str, password: str) -> None:
"""Validate a username and password."""
env = {"username": username, "password": password}
try:
process = await asyncio.create_subprocess_exec(
self.config[CONF_COMMAND],
*self.config[CONF_ARGS],
env=env,
stdout=asyncio.subprocess.PIPE if self.config[CONF_META] else None,
)
stdout, _ = await process.communicate()
except OSError as err:
# happens when command doesn't exist or permission is denied
_LOGGER.error("Error while authenticating %r: %s", username, err)
raise InvalidAuthError from err
if process.returncode != 0:
_LOGGER.error(
"User %r failed to authenticate, command exited with code %d",
username,
process.returncode,
)
raise InvalidAuthError
if self.config[CONF_META]:
meta: dict[str, str] = {}
for _line in stdout.splitlines():
try:
line = _line.decode().lstrip()
if line.startswith("#"):
continue
key, value = line.split("=", 1)
except ValueError:
# malformed line
continue
key = key.strip()
value = value.strip()
if key in self.ALLOWED_META_KEYS:
meta[key] = value
self._user_meta[username] = meta
async def async_get_or_create_credentials(
self, flow_result: Mapping[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
username = flow_result["username"]
for credential in await self.async_credentials():
if credential.data["username"] == username:
return credential
# Create new credentials.
return self.async_create_credentials({"username": username})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Currently, only name is supported.
"""
meta = self._user_meta.get(credentials.data["username"], {})
return UserMeta(name=meta.get("name"), is_active=True)
class CommandLineLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: dict[str, str] | None = None
) -> FlowResult:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
user_input["username"] = user_input["username"].strip()
try:
await cast(
CommandLineAuthProvider, self._auth_provider
).async_validate_login(user_input["username"], user_input["password"])
except InvalidAuthError:
errors["base"] = "invalid_auth"
if not errors:
user_input.pop("password")
return await self.async_finish(user_input)
schema: dict[str, type] = collections.OrderedDict()
schema["username"] = str
schema["password"] = str
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
| 34 | 86 | 0.614065 |
8e9c5b08d85b1be52376a38ab0642141f5298672
| 1,792 |
py
|
Python
|
network_selector/write_job_description.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 4 |
2019-11-19T03:35:35.000Z
|
2020-12-07T18:43:11.000Z
|
network_selector/write_job_description.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 3 |
2019-06-05T03:35:55.000Z
|
2020-04-09T14:16:08.000Z
|
network_selector/write_job_description.py
|
earthobservatory/ariamh-pub
|
f33731e127f38ff33b02e02c07b16793c07651a6
|
[
"Apache-2.0"
] | 6 |
2019-08-23T22:53:11.000Z
|
2021-11-06T15:15:30.000Z
|
#!/usr/bin/env python
import os, sys, json, argparse
from jobDescriptorWriter import write_job_descriptor
def write_job_desc(json_file, context_file):
"""Write job descriptor JSON file settings from context."""
# read in existing json
if os.path.exists(json_file):
with open(json_file) as f: j = json.load(f)
else: j = {}
# read in context json
if os.path.exists(context_file):
with open(context_file) as f: c = json.load(f)
else:
raise RuntimeError
# fields
fields = ['project', 'mode', 'workflow', 'unwrapper', 'unwrap',
'posting', 'filterStrength', 'output_name', 'geolist',
'productList']
# loop over fields and write
for field in fields:
if field in c:
if field == 'output_name':
j.setdefault('networkSelector', {})['outputFile'] = c[field]
elif field == 'geolist':
j.setdefault('createInterferogram', {})['geolist'] = c[field].split()
elif field == 'productList':
productList = [i for i in c[field].split() if i != "*.geo"]
if len(productList) > 0:
j.setdefault('createInterferogram', {})['productList'] = productList
else:
j[field] = c[field]
# write out final json
with open(json_file, 'w') as f:
json.dump(j, f, indent=2, sort_keys=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Write job description JSON file from context.json.")
parser.add_argument('--file', required=True, help="job description JSON file")
parser.add_argument('--context', required=True, help="context JSON file")
args = parser.parse_args()
write_job_desc(args.file, args.context)
| 34.461538 | 102 | 0.606585 |
e4311c2231765d9751500bec5b95f63da6788f49
| 2,852 |
py
|
Python
|
ar_app/main.py
|
osetr/ar-opencv-python
|
ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3
|
[
"MIT"
] | 1 |
2020-11-22T13:55:11.000Z
|
2020-11-22T13:55:11.000Z
|
ar_app/main.py
|
osetr/ar-opencv-python
|
ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3
|
[
"MIT"
] | null | null | null |
ar_app/main.py
|
osetr/ar-opencv-python
|
ae62ebeed176ef2e6d8d68fbaaa7d402dadc3eb3
|
[
"MIT"
] | null | null | null |
from classifier import Classifier
import cv2
import numpy as np
from datetime import datetime
# available descriptor for Classifier
from descriptors import ORB_Descriptor, SIFT_Descriptor
# available classification models for Classifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import AdaBoostClassifier
model = Classifier(descriptor=SIFT_Descriptor())
model.fit("with_object", "without_object")
# model.process_video("video_to_check.MOV", (640, 480), fps=30)
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
cover_img = cv2.imread("img.jpg")
myVid = cv2.VideoCapture("video.mp4")
video_frame = cv2.imread("img.jpg")
height, width, _ = cover_img.shape
video_frame = cv2.resize(video_frame, (width, height))
method = model.descriptor
method.compute(cover_img)
kp1, des1 = (method.points, method.descriptors)
bf = cv2.BFMatcher()
# fourcc = cv2.VideoWriter_fourcc(*"FMP4")
# out = cv2.VideoWriter(
# "results/augmenting_reality.avi",
# fourcc,
# 20,
# (640, 480)
# )
start_time_point = datetime.now()
frame_counter = 0
while True:
if (datetime.now() - start_time_point).seconds > 1:
start_time_point = datetime.now()
print(frame_counter)
frame_counter = 0
else:
frame_counter += 1
sucess, imgWebcam = cap.read()
imgAug = imgWebcam.copy()
method.compute(imgAug)
kp2, des2 = (method.points, method.descriptors)
try:
matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
good.append(m)
if model.predict(imgWebcam):
srcPts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dstPts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
matrix, mask = cv2.findHomography(srcPts, dstPts, cv2.RANSAC, 5)
pts = np.float32(
[[0, 0], [0, height], [width, height], [width, 0]]
).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, matrix)
imgWarp = cv2.warpPerspective(
video_frame, matrix, (imgWebcam.shape[1], imgWebcam.shape[0])
)
maskNew = np.zeros((imgWebcam.shape[0], imgWebcam.shape[1]), np.uint8)
cv2.fillPoly(maskNew, [np.int32(dst)], (255, 255, 255))
maskInv = cv2.bitwise_not(maskNew)
imgAug = cv2.bitwise_and(imgAug, imgAug, mask=maskInv)
imgAug = cv2.bitwise_or(imgWarp, imgAug)
except:
# print("Bad frame")
pass
# out.write(imgAug)
cv2.imshow("AugmentedReality", imgAug)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
| 30.666667 | 85 | 0.649719 |
21e6dd834ffffe16d0045784007d13c82dc4ca20
| 530 |
py
|
Python
|
tests/test.py
|
liuzhengzheng12/hash_LB
|
1ec7a2b8169cd890c9d19df10f256907d748be97
|
[
"Apache-2.0"
] | null | null | null |
tests/test.py
|
liuzhengzheng12/hash_LB
|
1ec7a2b8169cd890c9d19df10f256907d748be97
|
[
"Apache-2.0"
] | null | null | null |
tests/test.py
|
liuzhengzheng12/hash_LB
|
1ec7a2b8169cd890c9d19df10f256907d748be97
|
[
"Apache-2.0"
] | null | null | null |
import pd_base_tests
from ptf import config
from res_pd_rpc.ttypes import *
from ptf.testutils import *
from ptf.thriftutils import *
from heavy_hitter.p4_pd_rpc.ttypes import *
INSTANCE_NAME = 'hash_LB'
class Test(pd_base_tests.ThriftInterfaceDataPlane):
def __init__(self):
pd_base_tests.ThriftInterfaceDataPlane.__init__(self,
[INSTANCE_NAME])
self.instance_name = INSTANCE_NAME
def setUp(self):
pass
def runTest(self):
pass
| 23.043478 | 72 | 0.667925 |
13b8c33184f5322aacc7f790cbc65e7947f7be22
| 134 |
py
|
Python
|
32_tuple_updating.py
|
onowdev/python-selflearning
|
3d7245de0207a5bfcbce4f7adde60e7316b70a8e
|
[
"MIT"
] | null | null | null |
32_tuple_updating.py
|
onowdev/python-selflearning
|
3d7245de0207a5bfcbce4f7adde60e7316b70a8e
|
[
"MIT"
] | null | null | null |
32_tuple_updating.py
|
onowdev/python-selflearning
|
3d7245de0207a5bfcbce4f7adde60e7316b70a8e
|
[
"MIT"
] | null | null | null |
tup1 = (12, 34.56)
tup2 = ('abc', 'xyz')
# Following action is not valid for tuples
# tup1[0] = 100;
tup3 = tup1 + tup2
print(tup3)
| 14.888889 | 42 | 0.619403 |
466921f80e527c56f3ce6c4e165b86d11708622c
| 4,243 |
py
|
Python
|
docs/samples/v1beta1/transformer/torchserve_image_transformer/image_transformer/image_transformer.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 6 |
2022-02-15T21:54:19.000Z
|
2022-02-16T21:18:54.000Z
|
docs/samples/v1beta1/transformer/torchserve_image_transformer/image_transformer/image_transformer.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 7 |
2021-08-31T23:55:06.000Z
|
2022-03-02T11:34:58.000Z
|
docs/samples/v1beta1/transformer/torchserve_image_transformer/image_transformer/image_transformer.py
|
titoeb/kfserving
|
b072a76842b57e904dbdf46a136474a22051500d
|
[
"Apache-2.0"
] | 2 |
2021-12-16T10:32:07.000Z
|
2022-02-28T17:08:52.000Z
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import base64
import json
import tornado
from typing import List, Dict
from PIL import Image
import torchvision.transforms as transforms
import logging
import kfserving
logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)
EXPLAINER_URL_FORMAT = "http://{0}/v1/models/{1}:explain"
image_processing = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
def image_transform(instance):
"""converts the input image of Bytes Array into Tensor
Args:
instance (dict): The request input to make an inference
request for.
Returns:
list: Returns the data key's value and converts that into a list
after converting it into a tensor
"""
byte_array = base64.b64decode(instance["data"])
image = Image.open(io.BytesIO(byte_array))
instance["data"] = image_processing(image).tolist()
logging.info(instance)
return instance
class ImageTransformer(kfserving.KFModel):
""" A class object for the data handling activities of Image Classification
Task and returns a KFServing compatible response.
Args:
kfserving (class object): The KFModel class from the KFServing
modeule is passed here.
"""
def __init__(self, name: str, predictor_host: str):
"""Initialize the model name, predictor host and the explainer host
Args:
name (str): Name of the model.
predictor_host (str): The host in which the predictor runs.
"""
super().__init__(name)
self.predictor_host = predictor_host
self.explainer_host = predictor_host
logging.info("MODEL NAME %s", name)
logging.info("PREDICTOR URL %s", self.predictor_host)
logging.info("EXPLAINER URL %s", self.explainer_host)
self.timeout = 100
def preprocess(self, inputs: Dict) -> Dict:
"""Pre-process activity of the Image Input data.
Args:
inputs (Dict): KFServing http request
Returns:
Dict: Returns the request input after converting it into a tensor
"""
return {'instances': [image_transform(instance) for instance in inputs['instances']]}
def postprocess(self, inputs: List) -> List:
"""Post process function of Torchserve on the KFServing side is
written here.
Args:
inputs (List): The list of the inputs
Returns:
List: If a post process functionality is specified, it converts that into
a list.
"""
return inputs
async def explain(self, request: Dict) -> Dict:
"""Returns the captum explanations for the input request
Args:
request (Dict): http input request
Raises:
NotImplementedError: If the explainer host is not specified.
tornado.web.HTTPError: if the response code is not 200.
Returns:
Dict: Returns a dictionary response of the captum explain
"""
if self.explainer_host is None:
raise NotImplementedError
logging.info("Inside Image Transformer explain %s", EXPLAINER_URL_FORMAT.format(self.explainer_host, self.name))
response = await self._http_client.fetch(
EXPLAINER_URL_FORMAT.format(self.explainer_host, self.name),
method='POST',
request_timeout=self.timeout,
body=json.dumps(request)
)
if response.code != 200:
raise tornado.web.HTTPError(
status_code=response.code,
reason=response.body)
return json.loads(response.body)
| 33.148438 | 120 | 0.662267 |
1fb32ccd8ade0d8234a40ab72f239ce1a199094b
| 269 |
py
|
Python
|
apps/calculator/urls.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | null | null | null |
apps/calculator/urls.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | null | null | null |
apps/calculator/urls.py
|
martync/zython
|
e008bbb33e212f0856e85b8594003402e0a635c0
|
[
"Beerware"
] | null | null | null |
from django.urls import re_path
from calculator import views
urlpatterns = [
re_path(r'^$',
views.CalculatorHomeView.as_view(),
name='calculator_home'),
re_path(r'^raw/abv/$',
views.ABVView.as_view(),
name='calculator_abv'),
]
| 20.692308 | 43 | 0.635688 |
86b7ca24c5fd14b1a354aa2e18151149fb240c5d
| 6,490 |
py
|
Python
|
rgd/geodata/urls.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
rgd/geodata/urls.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
rgd/geodata/urls.py
|
venkatabhishek/ResonantGeoData
|
4e946e25c194874c22f4ba2ab49d6f0cf803e673
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path, register_converter
from . import api, views
class FloatUrlParameterConverter:
regex = r'-?[0-9]+\.?[0-9]+'
def to_python(self, value):
return float(value)
def to_url(self, value):
return str(value)
register_converter(FloatUrlParameterConverter, 'float')
urlpatterns = [
# Pages
path(r'', views.SpatialEntriesListView.as_view(), name='index'),
path(r'geodata/raster/', views.RasterMetaEntriesListView.as_view(), name='raster-search'),
path(
'geodata/statistics',
views.StatisticsView.as_view(),
name='statistics',
),
path(
'geodata/spatial_entries/<int:pk>/',
views.spatial_entry_redirect_view,
name='spatial-entry-detail',
),
path(
'geodata/raster/<int:pk>/',
views.RasterEntryDetailView.as_view(),
name='raster-entry-detail',
),
path(
'geodata/fmv/<int:pk>/',
views.FMVEntryDetailView.as_view(),
name='fmv-entry-detail',
),
path(
'geodata/geometry/<int:pk>/',
views.GeometryEntryDetailView.as_view(),
name='geometry-entry-detail',
),
path(
'geodata/point_cloud/<int:pk>/',
views.PointCloudEntryDetailView.as_view(),
name='point-cloud-entry-detail',
),
path(
'geodata/imagery/image_set/<int:pk>/',
views.ImageSetSpatialDetailView.as_view(),
name='image-set-spatial-detail',
),
#############
# Search
path('api/geosearch', api.search.SearchSpatialEntryView.as_view()),
path('api/geosearch/raster', api.search.SearchRasterMetaEntrySTACView.as_view()),
#############
# Other
path(
'api/geodata/status/<model>/<int:pk>',
api.download.get_status,
name='get-status',
),
path(
'api/geodata/common/spatial_entry/<int:spatial_id>',
api.get.GetSpatialEntry.as_view(),
name='spatial-entry',
),
path(
'api/geodata/common/spatial_entry/<int:spatial_id>/footprint',
api.get.GetSpatialEntryFootprint.as_view(),
name='spatial-entry-footprint',
),
path(
'api/geodata/common/checksum_file/<int:pk>',
api.get.GetChecksumFile.as_view(),
name='checksum-file',
),
path(
'api/geodata/common/checksum_file/<int:pk>/data',
api.download.download_checksum_file,
name='checksum-file-data',
),
path(
'api/geodata/geometry/<int:pk>',
api.get.GetGeometryEntry.as_view(),
name='geometry-entry',
),
path(
'api/geodata/geometry/<int:pk>/data',
api.get.GetGeometryEntryData.as_view(),
name='geometry-entry-data',
),
path(
'api/geodata/imagery/<int:pk>',
api.get.GetImageEntry.as_view(),
name='image-entry',
),
path(
'api/geodata/imagery/<int:pk>/data',
api.download.download_image_entry_file,
name='image-entry-data',
),
path(
'api/geodata/imagery/image_set/<int:pk>',
api.get.GetImageSet.as_view(),
name='image-set',
),
path(
'api/geodata/imagery/raster/<int:pk>',
api.get.GetRasterMetaEntry.as_view(),
name='raster-meta-entry',
),
path(
'api/geodata/imagery/raster/<int:pk>/stac',
api.get.GetRasterMetaEntrySTAC.as_view(),
name='raster-meta-entry-stac',
),
path(
'api/geodata/imagery/raster/stac',
api.post.CreateRasterSTAC.as_view(),
name='raster-meta-entry-stac-post',
),
path(
'api/geodata/fmv/<int:pk>',
api.get.GetFMVEntry.as_view(),
name='fmv-entry',
),
path(
'api/geodata/fmv/<int:pk>/data',
api.get.GetFMVDataEntry.as_view(),
name='fmv-entry-data',
),
path(
'api/geodata/point_cloud/<int:pk>',
api.get.GetPointCloudEntry.as_view(),
name='point-cloud-entry',
),
path(
'api/geodata/point_cloud/<int:pk>/base64',
api.get.GetPointCloudEntryData.as_view(),
name='point-cloud-entry-data',
),
#############
# Geoprocessing
path(
'api/geoprocess/imagery/<int:pk>/tiles',
api.tiles.TileMetadataView.as_view(),
name='image-tile-metadata',
),
path(
'api/geoprocess/imagery/<int:pk>/tiles/internal',
api.tiles.TileInternalMetadataView.as_view(),
name='image-tile-internal-metadata',
),
path(
'api/geoprocess/imagery/<int:pk>/tiles/<int:z>/<int:x>/<int:y>.png',
api.tiles.TileView.as_view(),
name='image-tiles',
),
path(
'api/geoprocess/imagery/<int:pk>/tiles/region/world/<float:left>/<float:right>/<float:bottom>/<float:top>/region.tif',
api.tiles.TileRegionView.as_view(),
name='image-region',
),
path(
'api/geoprocess/imagery/<int:pk>/tiles/region/pixel/<int:left>/<int:right>/<int:bottom>/<int:top>/region.tif',
api.tiles.TileRegionPixelView.as_view(),
name='image-region-pixel',
),
path(
'api/geoprocess/imagery/<int:pk>/tiles/<int:z>/<int:x>/<int:y>/corners',
api.tiles.TileCornersView.as_view(),
name='image-tile-corners',
),
path(
'api/geoprocess/imagery/<int:pk>/thumbnail',
api.tiles.TileThumnailView.as_view(),
name='image-thumbnail',
),
path(
'api/geoprocess/imagery/<int:pk>/bands',
api.tiles.TileBandInfoView.as_view(),
name='image-bands',
),
path(
'api/geoprocess/imagery/<int:pk>/bands/<int:band>',
api.tiles.TileSingleBandInfoView.as_view(),
name='image-bands-single',
),
path('api/geoprocess/imagery/cog', api.post.CreateConvertedImageFile.as_view()),
path(
'api/geoprocess/imagery/cog/<int:pk>',
api.get.GetConvertedImageStatus.as_view(),
name='cog',
),
path(
'api/geoprocess/imagery/cog/<int:pk>/data',
api.download.download_cog_file,
name='cog-data',
),
path(
'api/geoprocess/imagery/subsample',
api.post.CreateSubsampledImage.as_view(),
),
path(
'api/geoprocess/imagery/subsample/<int:pk>',
api.get.GetSubsampledImage.as_view(),
name='subsampled',
),
path(
'api/geoprocess/imagery/subsample/<int:pk>/status',
api.download.get_status_subsampled_image,
name='subsampled-status',
),
]
| 29.234234 | 126 | 0.591217 |
3f56ae56547e567a26d8b61a53f43b56e2364d30
| 23 |
py
|
Python
|
ytmusicapi/_version.py
|
BruceZhang1993/ytmusicapi
|
909d4d367a4c995a96a56e1d82d7d9a6a4471430
|
[
"MIT"
] | 689 |
2020-02-19T02:09:48.000Z
|
2022-03-31T18:32:14.000Z
|
ytmusicapi/_version.py
|
BruceZhang1993/ytmusicapi
|
909d4d367a4c995a96a56e1d82d7d9a6a4471430
|
[
"MIT"
] | 240 |
2020-04-03T08:05:43.000Z
|
2022-02-22T13:47:42.000Z
|
ytmusicapi/_version.py
|
BruceZhang1993/ytmusicapi
|
909d4d367a4c995a96a56e1d82d7d9a6a4471430
|
[
"MIT"
] | 123 |
2020-04-01T00:16:51.000Z
|
2022-03-23T00:54:54.000Z
|
__version__ = "0.19.5"
| 11.5 | 22 | 0.652174 |
0029cb3bf86c69d9577ffd7f54df5b05888954b6
| 1,420 |
py
|
Python
|
test/basicpubsub/backend.py
|
arnoschn/AutobahnJS
|
7e4b04f2062eee6eab01bda0b5850f54f948481d
|
[
"MIT"
] | null | null | null |
test/basicpubsub/backend.py
|
arnoschn/AutobahnJS
|
7e4b04f2062eee6eab01bda0b5850f54f948481d
|
[
"MIT"
] | null | null | null |
test/basicpubsub/backend.py
|
arnoschn/AutobahnJS
|
7e4b04f2062eee6eab01bda0b5850f54f948481d
|
[
"MIT"
] | null | null | null |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
def __init__(self, realm = "realm1"):
ApplicationSession.__init__(self)
self._realm = realm
def onConnect(self):
self.join(self._realm)
@inlineCallbacks
def onJoin(self, details):
counter = 0
while True:
self.publish('com.myapp.topic1', counter)
counter += 1
yield sleep(1)
| 29.583333 | 79 | 0.628169 |
b2bb1e6093097ea5db2fea73786ede54c23d2f88
| 21,724 |
py
|
Python
|
3rdparty/pytorch/test/test_indexing.py
|
WoodoLee/TorchCraft
|
999f68aab9e7d50ed3ae138297226dc95fefc458
|
[
"MIT"
] | null | null | null |
3rdparty/pytorch/test/test_indexing.py
|
WoodoLee/TorchCraft
|
999f68aab9e7d50ed3ae138297226dc95fefc458
|
[
"MIT"
] | null | null | null |
3rdparty/pytorch/test/test_indexing.py
|
WoodoLee/TorchCraft
|
999f68aab9e7d50ed3ae138297226dc95fefc458
|
[
"MIT"
] | null | null | null |
from common_utils import TestCase, run_tests
import torch
import warnings
from torch import tensor
import unittest
class TestIndexing(TestCase):
def test_single_int(self):
v = torch.randn(5, 7, 3)
self.assertEqual(v[4].shape, (7, 3))
def test_multiple_int(self):
v = torch.randn(5, 7, 3)
self.assertEqual(v[4].shape, (7, 3))
self.assertEqual(v[4, :, 1].shape, (7,))
def test_none(self):
v = torch.randn(5, 7, 3)
self.assertEqual(v[None].shape, (1, 5, 7, 3))
self.assertEqual(v[:, None].shape, (5, 1, 7, 3))
self.assertEqual(v[:, None, None].shape, (5, 1, 1, 7, 3))
self.assertEqual(v[..., None].shape, (5, 7, 3, 1))
def test_step(self):
v = torch.arange(10)
self.assertEqual(v[::1], v)
self.assertEqual(v[::2].tolist(), [0, 2, 4, 6, 8])
self.assertEqual(v[::3].tolist(), [0, 3, 6, 9])
self.assertEqual(v[::11].tolist(), [0])
self.assertEqual(v[1:6:2].tolist(), [1, 3, 5])
def test_step_assignment(self):
v = torch.zeros(4, 4)
v[0, 1::2] = torch.tensor([3., 4.])
self.assertEqual(v[0].tolist(), [0, 3, 0, 4])
self.assertEqual(v[1:].sum(), 0)
def test_byte_mask(self):
v = torch.randn(5, 7, 3)
mask = torch.ByteTensor([1, 0, 1, 1, 0])
self.assertEqual(v[mask].shape, (3, 7, 3))
self.assertEqual(v[mask], torch.stack([v[0], v[2], v[3]]))
v = torch.tensor([1.])
self.assertEqual(v[v == 0], torch.tensor([]))
def test_byte_mask_accumulate(self):
mask = torch.zeros(size=(10, ), dtype=torch.uint8)
y = torch.ones(size=(10, 10))
y.index_put_((mask, ), y[mask], accumulate=True)
self.assertEqual(y, torch.ones(size=(10, 10)))
def test_multiple_byte_mask(self):
v = torch.randn(5, 7, 3)
# note: these broadcast together and are transposed to the first dim
mask1 = torch.ByteTensor([1, 0, 1, 1, 0])
mask2 = torch.ByteTensor([1, 1, 1])
self.assertEqual(v[mask1, :, mask2].shape, (3, 7))
def test_byte_mask2d(self):
v = torch.randn(5, 7, 3)
c = torch.randn(5, 7)
num_ones = (c > 0).sum()
r = v[c > 0]
self.assertEqual(r.shape, (num_ones, 3))
def test_int_indices(self):
v = torch.randn(5, 7, 3)
self.assertEqual(v[[0, 4, 2]].shape, (3, 7, 3))
self.assertEqual(v[:, [0, 4, 2]].shape, (5, 3, 3))
self.assertEqual(v[:, [[0, 1], [4, 3]]].shape, (5, 2, 2, 3))
def test_int_indices2d(self):
# From the NumPy indexing example
x = torch.arange(0, 12).view(4, 3)
rows = torch.tensor([[0, 0], [3, 3]])
columns = torch.tensor([[0, 2], [0, 2]])
self.assertEqual(x[rows, columns].tolist(), [[0, 2], [9, 11]])
def test_int_indices_broadcast(self):
# From the NumPy indexing example
x = torch.arange(0, 12).view(4, 3)
rows = torch.tensor([0, 3])
columns = torch.tensor([0, 2])
result = x[rows[:, None], columns]
self.assertEqual(result.tolist(), [[0, 2], [9, 11]])
def test_empty_index(self):
x = torch.arange(0, 12).view(4, 3)
idx = torch.tensor([], dtype=torch.long)
self.assertEqual(x[idx].numel(), 0)
# empty assignment should have no effect but not throw an exception
y = x.clone()
y[idx] = -1
self.assertEqual(x, y)
mask = torch.zeros(4, 3).byte()
y[mask] = -1
self.assertEqual(x, y)
def test_empty_ndim_index(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
x = torch.randn(5, device=device)
self.assertEqual(torch.empty(0, 2, device=device), x[torch.empty(0, 2, dtype=torch.int64, device=device)])
x = torch.randn(2, 3, 4, 5, device=device)
self.assertEqual(torch.empty(2, 0, 6, 4, 5, device=device),
x[:, torch.empty(0, 6, dtype=torch.int64, device=device)])
x = torch.empty(10, 0)
self.assertEqual(x[[1, 2]].shape, (2, 0))
self.assertEqual(x[[], []].shape, (0,))
with self.assertRaisesRegex(RuntimeError, 'for dim with size 0'):
x[:, [0, 1]]
def test_empty_ndim_index_bool(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
x = torch.randn(5, device=device)
self.assertRaises(IndexError, lambda: x[torch.empty(0, 2, dtype=torch.uint8, device=device)])
def test_empty_slice(self):
devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']
for device in devices:
x = torch.randn(2, 3, 4, 5, device=device)
y = x[:, :, :, 1]
z = y[:, 1:1, :]
self.assertEqual((2, 0, 4), z.shape)
# this isn't technically necessary, but matches NumPy stride calculations.
self.assertEqual((60, 20, 5), z.stride())
self.assertTrue(z.is_contiguous())
def test_index_getitem_copy_bools_slices(self):
true = torch.tensor(1, dtype=torch.uint8)
false = torch.tensor(0, dtype=torch.uint8)
tensors = [torch.randn(2, 3), torch.tensor(3)]
for a in tensors:
self.assertNotEqual(a.data_ptr(), a[True].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[False])
self.assertNotEqual(a.data_ptr(), a[true].data_ptr())
self.assertEqual(torch.empty(0, *a.shape), a[false])
self.assertEqual(a.data_ptr(), a[None].data_ptr())
self.assertEqual(a.data_ptr(), a[...].data_ptr())
def test_index_setitem_bools_slices(self):
true = torch.tensor(1, dtype=torch.uint8)
false = torch.tensor(0, dtype=torch.uint8)
tensors = [torch.randn(2, 3), torch.tensor(3)]
for a in tensors:
# prefix with a 1,1, to ensure we are compatible with numpy which cuts off prefix 1s
# (some of these ops already prefix a 1 to the size)
neg_ones = torch.ones_like(a) * -1
neg_ones_expanded = neg_ones.unsqueeze(0).unsqueeze(0)
a[True] = neg_ones_expanded
self.assertEqual(a, neg_ones)
a[False] = 5
self.assertEqual(a, neg_ones)
a[true] = neg_ones_expanded * 2
self.assertEqual(a, neg_ones * 2)
a[false] = 5
self.assertEqual(a, neg_ones * 2)
a[None] = neg_ones_expanded * 3
self.assertEqual(a, neg_ones * 3)
a[...] = neg_ones_expanded * 4
self.assertEqual(a, neg_ones * 4)
if a.dim() == 0:
with self.assertRaises(RuntimeError):
a[:] = neg_ones_expanded * 5
def test_setitem_expansion_error(self):
true = torch.tensor(True)
a = torch.randn(2, 3)
# check prefix with non-1s doesn't work
a_expanded = a.expand(torch.Size([5, 1]) + a.size())
with self.assertRaises(RuntimeError):
a[True] = a_expanded
with self.assertRaises(RuntimeError):
a[true] = a_expanded
def test_getitem_scalars(self):
zero = torch.tensor(0, dtype=torch.int64)
one = torch.tensor(1, dtype=torch.int64)
# non-scalar indexed with scalars
a = torch.randn(2, 3)
self.assertEqual(a[0], a[zero])
self.assertEqual(a[0][1], a[zero][one])
self.assertEqual(a[0, 1], a[zero, one])
self.assertEqual(a[0, one], a[zero, 1])
# indexing by a scalar should slice (not copy)
self.assertEqual(a[0, 1].data_ptr(), a[zero, one].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.int()].data_ptr())
self.assertEqual(a[1].data_ptr(), a[one.short()].data_ptr())
# scalar indexed with scalar
r = torch.randn(())
with self.assertRaises(RuntimeError):
r[:]
with self.assertRaises(IndexError):
r[zero]
self.assertEqual(r, r[...])
def test_setitem_scalars(self):
zero = torch.tensor(0, dtype=torch.int64)
# non-scalar indexed with scalars
a = torch.randn(2, 3)
a_set_with_number = a.clone()
a_set_with_scalar = a.clone()
b = torch.randn(3)
a_set_with_number[0] = b
a_set_with_scalar[zero] = b
self.assertEqual(a_set_with_number, a_set_with_scalar)
a[1, zero] = 7.7
self.assertEqual(7.7, a[1, 0])
# scalar indexed with scalars
r = torch.randn(())
with self.assertRaises(RuntimeError):
r[:] = 8.8
with self.assertRaises(IndexError):
r[zero] = 8.8
r[...] = 9.9
self.assertEqual(9.9, r)
def test_basic_advanced_combined(self):
# From the NumPy indexing example
x = torch.arange(0, 12).view(4, 3)
self.assertEqual(x[1:2, 1:3], x[1:2, [1, 2]])
self.assertEqual(x[1:2, 1:3].tolist(), [[4, 5]])
# Check that it is a copy
unmodified = x.clone()
x[1:2, [1, 2]].zero_()
self.assertEqual(x, unmodified)
# But assignment should modify the original
unmodified = x.clone()
x[1:2, [1, 2]] = 0
self.assertNotEqual(x, unmodified)
def test_int_assignment(self):
x = torch.arange(0, 4).view(2, 2)
x[1] = 5
self.assertEqual(x.tolist(), [[0, 1], [5, 5]])
x = torch.arange(0, 4).view(2, 2)
x[1] = torch.arange(5, 7)
self.assertEqual(x.tolist(), [[0, 1], [5, 6]])
def test_byte_tensor_assignment(self):
x = torch.arange(0., 16).view(4, 4)
b = torch.ByteTensor([True, False, True, False])
value = torch.tensor([3., 4., 5., 6.])
x[b] = value
self.assertEqual(x[0], value)
self.assertEqual(x[1], torch.arange(4, 8))
self.assertEqual(x[2], value)
self.assertEqual(x[3], torch.arange(12, 16))
def test_variable_slicing(self):
x = torch.arange(0, 16).view(4, 4)
indices = torch.IntTensor([0, 1])
i, j = indices
self.assertEqual(x[i:j], x[0:1])
def test_ellipsis_tensor(self):
x = torch.arange(0, 9).view(3, 3)
idx = torch.tensor([0, 2])
self.assertEqual(x[..., idx].tolist(), [[0, 2],
[3, 5],
[6, 8]])
self.assertEqual(x[idx, ...].tolist(), [[0, 1, 2],
[6, 7, 8]])
def test_invalid_index(self):
x = torch.arange(0, 16).view(4, 4)
self.assertRaisesRegex(TypeError, 'slice indices', lambda: x["0":"1"])
def test_zero_dim_index(self):
x = torch.tensor(10)
self.assertEqual(x, x.item())
def runner():
print(x[0])
return x[0]
self.assertRaisesRegex(IndexError, 'invalid index', runner)
# The tests below are from NumPy test_indexing.py with some modifications to
# make them compatible with PyTorch. It's licensed under the BDS license below:
#
# Copyright (c) 2005-2017, NumPy Developers.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class NumpyTests(TestCase):
def test_index_no_floats(self):
a = torch.tensor([[[5.]]])
self.assertRaises(IndexError, lambda: a[0.0])
self.assertRaises(IndexError, lambda: a[0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0])
self.assertRaises(IndexError, lambda: a[0.0, :])
self.assertRaises(IndexError, lambda: a[:, 0.0])
self.assertRaises(IndexError, lambda: a[:, 0.0, :])
self.assertRaises(IndexError, lambda: a[0.0, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, 0.0])
self.assertRaises(IndexError, lambda: a[0.0, 0, 0])
self.assertRaises(IndexError, lambda: a[0, 0.0, 0])
self.assertRaises(IndexError, lambda: a[-1.4])
self.assertRaises(IndexError, lambda: a[0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0])
self.assertRaises(IndexError, lambda: a[-1.4, :])
self.assertRaises(IndexError, lambda: a[:, -1.4])
self.assertRaises(IndexError, lambda: a[:, -1.4, :])
self.assertRaises(IndexError, lambda: a[-1.4, :, :])
self.assertRaises(IndexError, lambda: a[0, 0, -1.4])
self.assertRaises(IndexError, lambda: a[-1.4, 0, 0])
self.assertRaises(IndexError, lambda: a[0, -1.4, 0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0])
# self.assertRaises(IndexError, lambda: a[0.0:, 0.0,:])
def test_none_index(self):
# `None` index adds newaxis
a = tensor([1, 2, 3])
self.assertEqual(a[None].dim(), a.dim() + 1)
def test_empty_tuple_index(self):
# Empty tuple index creates a view
a = tensor([1, 2, 3])
self.assertEqual(a[()], a)
self.assertEqual(a[()].data_ptr(), a.data_ptr())
def test_empty_fancy_index(self):
# Empty list index creates an empty array
a = tensor([1, 2, 3])
self.assertEqual(a[[]], torch.tensor([]))
b = tensor([]).long()
self.assertEqual(a[[]], torch.tensor([], dtype=torch.long))
b = tensor([]).float()
self.assertRaises(RuntimeError, lambda: a[b])
def test_ellipsis_index(self):
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
self.assertIsNot(a[...], a)
self.assertEqual(a[...], a)
# `a[...]` was `a` in numpy <1.9.
self.assertEqual(a[...].data_ptr(), a.data_ptr())
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
self.assertEqual(a[0, ...], a[0])
self.assertEqual(a[0, ...], a[0, :])
self.assertEqual(a[..., 0], a[:, 0])
# In NumPy, slicing with ellipsis results in a 0-dim array. In PyTorch
# we don't have separate 0-dim arrays and scalars.
self.assertEqual(a[0, ..., 1], torch.tensor(2))
# Assignment with `(Ellipsis,)` on 0-d arrays
b = torch.tensor(1)
b[(Ellipsis,)] = 2
self.assertEqual(b, 2)
def test_single_int_index(self):
# Single integer index selects one row
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
self.assertEqual(a[0], [1, 2, 3])
self.assertEqual(a[-1], [7, 8, 9])
# Index out of bounds produces IndexError
self.assertRaises(IndexError, a.__getitem__, 1 << 30)
# Index overflow produces Exception NB: different exception type
self.assertRaises(Exception, a.__getitem__, 1 << 64)
def test_single_bool_index(self):
# Single boolean index
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
self.assertEqual(a[True], a[None])
self.assertEqual(a[False], a[None][0:0])
def test_boolean_shape_mismatch(self):
arr = torch.ones((5, 4, 3))
# TODO: prefer IndexError
index = tensor([True])
self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
index = tensor([False] * 6)
self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
index = torch.ByteTensor(4, 4).zero_()
self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[index])
self.assertRaisesRegex(RuntimeError, 'mask', lambda: arr[(slice(None), index)])
def test_boolean_indexing_onedim(self):
# Indexing a 2-dimensional array with
# boolean array of length one
a = tensor([[0., 0., 0.]])
b = tensor([True])
self.assertEqual(a[b], a)
# boolean assignment
a[b] = 1.
self.assertEqual(a, tensor([[1., 1., 1.]]))
def test_boolean_assignment_value_mismatch(self):
# A boolean assignment should fail when the shape of the values
# cannot be broadcast to the subscription. (see also gh-3458)
a = torch.arange(0, 4)
def f(a, v):
a[a > -1] = tensor(v)
self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [])
self.assertRaisesRegex(Exception, 'shape mismatch', f, a, [1, 2, 3])
self.assertRaisesRegex(Exception, 'shape mismatch', f, a[:1], [1, 2, 3])
def test_boolean_indexing_twodim(self):
# Indexing a 2-dimensional array with
# 2-dimensional boolean array
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = tensor([[True, False, True],
[False, True, False],
[True, False, True]])
self.assertEqual(a[b], tensor([1, 3, 5, 7, 9]))
self.assertEqual(a[b[1]], tensor([[4, 5, 6]]))
self.assertEqual(a[b[0]], a[b[2]])
# boolean assignment
a[b] = 0
self.assertEqual(a, tensor([[0, 2, 0],
[4, 0, 6],
[0, 8, 0]]))
def test_boolean_indexing_weirdness(self):
# Weird boolean indexing things
a = torch.ones((2, 3, 4))
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2), a[True, [0, 1], True, True, [1], [[2]]])
self.assertRaises(RuntimeError, lambda: a[False, [0, 1], ...])
def test_boolean_indexing_weirdness_tensors(self):
# Weird boolean indexing things
false = torch.tensor(False)
true = torch.tensor(True)
a = torch.ones((2, 3, 4))
self.assertEqual((0, 2, 3, 4), a[False, True, ...].shape)
self.assertEqual(torch.ones(1, 2), a[true, [0, 1], true, true, [1], [[2]]])
self.assertRaises(RuntimeError, lambda: a[false, [0, 1], ...])
def test_boolean_indexing_alldims(self):
true = torch.tensor(True)
a = torch.ones((2, 3))
self.assertEqual((1, 2, 3), a[True, True].shape)
self.assertEqual((1, 2, 3), a[true, true].shape)
def test_boolean_list_indexing(self):
# Indexing a 2-dimensional array with
# boolean lists
a = tensor([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
b = [True, False, False]
c = [True, True, False]
self.assertEqual(a[b], tensor([[1, 2, 3]]))
self.assertEqual(a[b, b], tensor([1]))
self.assertEqual(a[c], tensor([[1, 2, 3], [4, 5, 6]]))
self.assertEqual(a[c, c], tensor([1, 5]))
def test_everything_returns_views(self):
# Before `...` would return a itself.
a = tensor([5])
self.assertIsNot(a, a[()])
self.assertIsNot(a, a[...])
self.assertIsNot(a, a[:])
def test_broaderrors_indexing(self):
a = torch.zeros(5, 5)
self.assertRaisesRegex(RuntimeError, 'shape mismatch', a.__getitem__, ([0, 1], [0, 1, 2]))
self.assertRaisesRegex(RuntimeError, 'shape mismatch', a.__setitem__, ([0, 1], [0, 1, 2]), 0)
def test_trivial_fancy_out_of_bounds(self):
a = torch.zeros(5)
ind = torch.ones(20, dtype=torch.int64)
if a.is_cuda:
raise unittest.SkipTest('CUDA asserts instead of raising an exception')
ind[-1] = 10
self.assertRaises(RuntimeError, a.__getitem__, ind)
self.assertRaises(RuntimeError, a.__setitem__, ind, 0)
ind = torch.ones(20, dtype=torch.int64)
ind[0] = 11
self.assertRaises(RuntimeError, a.__getitem__, ind)
self.assertRaises(RuntimeError, a.__setitem__, ind, 0)
def test_index_is_larger(self):
# Simple case of fancy index broadcasting of the index.
a = torch.zeros((5, 5))
a[[[0], [1], [2]], [0, 1, 2]] = tensor([2., 3., 4.])
self.assertTrue((a[:3, :3] == tensor([2., 3., 4.])).all())
def test_broadcast_subspace(self):
a = torch.zeros((100, 100))
v = torch.arange(0., 100)[:, None]
b = torch.arange(99, -1, -1).long()
a[b] = v
expected = b.double().unsqueeze(1).expand(100, 100)
self.assertEqual(a, expected)
if __name__ == '__main__':
run_tests()
| 38.381625 | 118 | 0.568358 |
6cba07b99d4ad8fe81745302f6fbd4c3f5a6ece6
| 16,484 |
py
|
Python
|
dojango/management/commands/dojobuild.py
|
ofirr/dojango
|
9856ca4a0194f59a5fe41cc32739e36eaa1f4bc3
|
[
"BSD-3-Clause"
] | 1 |
2016-12-27T09:46:02.000Z
|
2016-12-27T09:46:02.000Z
|
dojango/management/commands/dojobuild.py
|
ofirr/dojango
|
9856ca4a0194f59a5fe41cc32739e36eaa1f4bc3
|
[
"BSD-3-Clause"
] | null | null | null |
dojango/management/commands/dojobuild.py
|
ofirr/dojango
|
9856ca4a0194f59a5fe41cc32739e36eaa1f4bc3
|
[
"BSD-3-Clause"
] | null | null | null |
from optparse import make_option
import os
import re
import shutil
import subprocess # since python 2.4
import sys
from dojango.conf import settings
from dojango.version import version_tuple
try:
from django.core.management.base import BaseCommand, CommandError
except ImportError:
# Fake BaseCommand out so imports on django 0.96 don't fail.
BaseCommand = object
class CommandError(Exception):
pass
class Command(BaseCommand):
'''This command is used to create your own dojo build. To start a build, you just
have to type:
./manage.py dojobuild
in your django project path. With this call, the default build profile "dojango" is used
and dojango.profile.js will act as its dojo build configuration. You can also add the
option --build_version=dev1.1.1 (for example) to mark the build with it.
If you want to call a specific build profile from DOJO_BUILD_PROFILES, you just have to
append the profile name to this commandline call:
./manage.py dojobuild profilename
'''
option_list = BaseCommand.option_list + (
make_option('--build_version', dest='build_version',
help='Set the version of the build release (e.g. dojango_1.1.1).'),
make_option('--minify', dest='minify', action="store_true", default=False,
help='Does a dojo mini build (mainly removing unneeded files (tests/templates/...)'),
make_option('--minify_extreme', dest='minify_extreme', action="store_true", default=False,
help='Does a dojo extreme-mini build (keeps only what is defined in build profile and all media files)'),
make_option('--prepare_zipserve', dest='prepare_zipserve', action="store_true", default=False,
help='Zips everything you have built, so it can be deployed to Google AppEngine'),
)
help = "Builds a dojo release."
args = '[dojo build profile name]'
dojo_base_dir = None
dojo_release_dir = None
skip_files = None
def handle(self, *args, **options):
if len(args)==0:
# with no param, we use the default profile, that is defined in the settings
profile_name = settings.DOJO_BUILD_PROFILE
else:
profile_name = args[0]
profile = self._get_profile(profile_name)
used_src_version = profile['used_src_version'] % {'DOJO_BUILD_VERSION': settings.DOJO_BUILD_VERSION} # no dependencies to project's settings.py file!
# used by minify_extreme!
self.skip_files = profile.get("minify_extreme_skip_files", ())
self.dojo_base_dir = "%(dojo_root)s/%(version)s" % \
{'dojo_root':settings.BASE_DOJO_ROOT,
'version':used_src_version}
# does the defined dojo-directory exist?
util_base_dir = "%(dojo_base_dir)s/util" % {'dojo_base_dir':self.dojo_base_dir}
if not os.path.exists(util_base_dir):
raise CommandError('Put the the dojo source files (version \'%(version)s\') in the folder \'%(folder)s/%(version)s\' or set a different version in settings.DOJANGO_DOJO_BUILD_VERSION' % \
{'version':used_src_version,
'folder':settings.BASE_DOJO_ROOT})
# check, if java is installed
try:
# ignoring output of the java call
subprocess.call(settings.DOJO_BUILD_JAVA_EXEC, stdout=subprocess.PIPE) # will work with python >= 2.4
except:
raise CommandError('Please install java. You need it for building dojo.')
buildscript_dir = os.path.abspath('%s/buildscripts' % util_base_dir)
if version_tuple(settings.DOJO_BUILD_USED_VERSION) < (1,2,0):
executable = '%(java_exec)s -jar ../shrinksafe/custom_rhino.jar build.js' % \
{'java_exec':settings.DOJO_BUILD_JAVA_EXEC}
else:
# use the new build command line call!
if(os.path.sep == "\\"):
executable = 'build.bat'
else:
executable = './build.sh'
# force executable rights!
os.chmod(os.path.join(buildscript_dir, 'build.sh'), 0o755)
# use the passed version for building
version = options.get('build_version', None)
if not version:
# if no option --build_version was passed, we use the default build version
version = profile['build_version'] % {'DOJO_BUILD_VERSION': settings.DOJO_BUILD_VERSION} # no dependencies to project's settings.py file!
# we add the version to our destination base path
self.dojo_release_dir = '%(base_path)s/%(version)s' % {
'base_path':profile['base_root'] % {'BASE_MEDIA_ROOT':settings.BASE_MEDIA_ROOT},
'version':version} # we don't want to have a dependancy to the project's settings file!
release_dir = os.path.abspath(os.path.join(self.dojo_release_dir, "../"))
# the build command handling is so different between the versions!
# sometimes we need to add /, sometimes not :-(
if version_tuple(settings.DOJO_BUILD_USED_VERSION) < (1,2,0):
release_dir = release_dir + os.path.sep
# setting up the build command
build_addons = ""
if version_tuple(settings.DOJO_BUILD_USED_VERSION) >= (1,2,0):
# since version 1.2.0 there is an additional commandline option that does the mini build (solved within js!)
build_addons = "mini=true"
exe_command = 'cd "%(buildscript_dir)s" && %(executable)s version=%(version)s releaseName="%(version)s" releaseDir="%(release_dir)s" %(options)s %(build_addons)s' % \
{'buildscript_dir':buildscript_dir,
'executable':executable,
'version':version,
'release_dir':release_dir,
'options':profile['options'] % {'BASE_MEDIA_ROOT':settings.BASE_MEDIA_ROOT},
'build_addons':build_addons}
# print exe_command
minify = options['minify']
minify_extreme = options['minify_extreme']
prepare_zipserve = options['prepare_zipserve']
if (version_tuple(settings.DOJO_BUILD_USED_VERSION) < (1,2,0)) and (minify or minify_extreme):
self._dojo_mini_before_build()
if sys.platform == 'win32': # fixing issue #39, if dojango is installed on a different drive
exe_command = os.path.splitdrive(buildscript_dir)[0] + ' && ' + exe_command
# do the build
exit_code = os.system(exe_command)
if exit_code: # != 0
sys.exit(1) # dojobuild exits because of shrinksafe error
if version_tuple(settings.DOJO_BUILD_USED_VERSION) < (1,2,0):
if minify or minify_extreme:
self._dojo_mini_after_build()
if minify_extreme:
self._dojo_mini_extreme()
if prepare_zipserve:
self._dojo_prepare_zipserve()
def _get_profile(self, name):
default_profile_settings = settings.DOJO_BUILD_PROFILES_DEFAULT
try:
profile = settings.DOJO_BUILD_PROFILES[name]
# mixing in the default settings for the build profiles!
default_profile_settings.update(profile)
return default_profile_settings
except KeyError:
raise CommandError('The profile \'%s\' does not exist in DOJO_BUILD_PROFILES' % name)
def _dojo_mini_before_build(self):
# FIXME: refs #6616 - could be able to set a global copyright file and null out build_release.txt
shutil.move("%s/util/buildscripts/copyright.txt" % self.dojo_base_dir, "%s/util/buildscripts/_copyright.txt" % self.dojo_base_dir)
if not os.path.exists("%s/util/buildscripts/copyright_mini.txt" % self.dojo_base_dir):
f = open("%s/util/buildscripts/copyright.txt" % self.dojo_base_dir, 'w')
f.write('''/*
Copyright (c) 2004-2008, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/''')
f.close()
else:
shutil.copyfile("%s/util/buildscripts/copyright_mini.txt" % self.dojo_base_dir, "%s/util/buildscripts/copyright.txt" % self.dojo_base_dir)
shutil.move("%s/util/buildscripts/build_notice.txt" % self.dojo_base_dir, "%s/util/buildscripts/_build_notice.txt" % self.dojo_base_dir)
# create an empty build-notice-file
f = open("%s/util/buildscripts/build_notice.txt" % self.dojo_base_dir, 'w')
f.close()
def _dojo_mini_after_build(self):
try:
'''Copied from the build_mini.sh shell script (thank you Pete Higgins :-))'''
if not os.path.exists(self.dojo_release_dir):
raise CommandError('The dojo build failed! Check messages above!')
else:
# remove dojox tests and demos - they all follow this convetion
self._remove_files('%s/dojox' % self.dojo_release_dir, ('^tests$', '^demos$'))
# removed dijit tests
dijit_tests = ("dijit/tests", "dijit/demos", "dijit/bench",
"dojo/tests", "util",
"dijit/themes/themeTesterImages")
self._remove_folders(dijit_tests)
# noir isn't worth including yet
noir_theme_path = ("%s/dijit/themes/noir" % self.dojo_release_dir,)
self._remove_folders(noir_theme_path)
# so the themes are there, lets assume that, piggyback on noir: FIXME later
self._remove_files('%s/dijit/themes' % self.dojo_release_dir, ('^.*\.html$',))
self._remove_files(self.dojo_release_dir, ('^.*\.uncompressed\.js$',))
# WARNING: templates have been inlined into the .js -- if you are using dynamic templates,
# or other build trickery, these lines might not work!
self._remove_files("dijit/templates", ("^\.html$",))
self._remove_files("dijit/form/templates", ("^\.html$",))
self._remove_files("dijit/layout/templates", ("^\.html$",))
# .. assume you didn't, and clean up all the README's (leaving LICENSE, mind you)
self._remove_files('%s/dojo/dojox' % self.dojo_release_dir, ('^README$',))
dojo_folders = ("dojo/_base",)
self._remove_folders(dojo_folders)
os.remove("%s/dojo/_base.js" % self.dojo_release_dir)
os.remove("%s/dojo/build.txt" % self.dojo_release_dir)
os.remove("%s/dojo/tests.js" % self.dojo_release_dir)
except Exception as e:
print(e)
sys.exit(1)
# cleanup from above, refs #6616
shutil.move("%s/util/buildscripts/_copyright.txt" % self.dojo_base_dir, "%s/util/buildscripts/copyright.txt" % self.dojo_base_dir)
shutil.move("%s/util/buildscripts/_build_notice.txt" % self.dojo_base_dir, "%s/util/buildscripts/build_notice.txt" % self.dojo_base_dir)
def _remove_folders(self, folders):
for folder in folders:
if os.path.exists("%s/%s" % (self.dojo_release_dir, folder)):
shutil.rmtree("%s/%s" % (self.dojo_release_dir, folder))
def _remove_files(self, base_folder, regexp_list):
for root, dirs, files in os.walk(base_folder):
for file in files:
# remove all html-files
for regexp in regexp_list:
my_re = re.compile(regexp)
if my_re.match(file):
os.remove(os.path.join(root, file))
for dir in dirs:
for regexp in regexp_list:
my_re = re.compile(regexp)
if my_re.match(dir):
shutil.rmtree(os.path.join(root, dir))
SKIP_FILES = (
'(.*\.png)',
'(.*\.gif)',
'(.*\.jpg)',
'(.*\.svg)',
'(.*\.swf)',
'(.*\.fla)',
'(.*\.mov)',
'(.*\.smd)',
'(dojo/_firebug/firebug\..*)',
'(dojo/dojo\.(xd\.)?js)',
'(dojo/nls/.*)',
'(dojo/resources/dojo\.css)',
'(dojo/resources/blank\.html)',
'(dojo/resources/iframe_history\.html)',
'(dijit/themes/tundra/tundra\.css)',
'(dijit/themes/soria/soria\.css)',
'(dijit/themes/nihilo/nihilo\.css)',
'(dojox/dtl/contrib/.*)',
'(dojox/dtl/ext-dojo/.*)',
'(dojox/dtl/filter/.*)',
'(dojox/dtl/render/.*)',
'(dojox/dtl/tag/.*)',
'(dojox/dtl/utils/.*)',
'(dojox/io/proxy/xip_.*\.html)',
)
def _dojo_mini_extreme(self):
"""
This method removes all js files and just leaves all layer dojo files and static files (like "png", "gif", "svg", "swf", ...)
"""
# prepare the regexp of files not to be removed!
# mixin the profile specific skip files
skip_files = self.SKIP_FILES + self.skip_files
my_re = re.compile('^(.*/)?(%s)$' % "|".join(skip_files))
try:
'''Copied from the build_mini.sh shell script'''
if not os.path.exists(self.dojo_release_dir):
raise CommandError('The dojo build failed! Check messages above!')
else:
for root, dirs, files in os.walk(self.dojo_release_dir):
for file in files:
# remove all html-files
my_file = os.path.abspath(os.path.join(root, file))
if not my_re.match(my_file):
os.remove(my_file)
# now remove all empty directories
for root, dirs, files in os.walk(self.dojo_release_dir):
for dir in dirs:
try:
# just empty directories will be removed!
os.removedirs(os.path.join(root, dir))
except OSError:
pass
except Exception as e:
print(e)
sys.exit(1)
DOJO_ZIP_SPECIAL = {'dojox': ['form', 'widget', 'grid']} # these modules will be zipped separately
def _dojo_prepare_zipserve(self):
"""
Creates zip packages for each dojo module within the current release folder.
It splits the module dojox into several modules, so it fits the 1000 files limit of
Google AppEngine.
"""
for folder in os.listdir(self.dojo_release_dir):
module_dir = '%s/%s' % (self.dojo_release_dir, folder)
if os.path.isdir(module_dir):
if folder in list(self.DOJO_ZIP_SPECIAL.keys()):
for special_module in self.DOJO_ZIP_SPECIAL[folder]:
special_module_dir = os.path.join(module_dir, special_module)
create_zip(special_module_dir,
'%(base_module)s/%(special_module)s' % {
'base_module': folder,
'special_module': special_module
},
'%(module_dir)s.%(special_module)s.zip' % {
'module_dir': module_dir,
'special_module': special_module
}
)
# remove the whole special module
shutil.rmtree(special_module_dir)
# now add the
create_zip(module_dir, folder, module_dir + ".zip")
shutil.rmtree(module_dir)
def zipfolder(path, relname, archive):
paths = os.listdir(path)
for p in paths:
p1 = os.path.join(path, p)
p2 = os.path.join(relname, p)
if os.path.isdir(p1):
zipfolder(p1, p2, archive)
else:
archive.write(p1, p2)
def create_zip(path, relname, archname):
import zipfile
archive = zipfile.ZipFile(archname, "w", zipfile.ZIP_DEFLATED)
if os.path.isdir(path):
zipfolder(path, relname, archive)
else:
archive.write(path, relname)
archive.close()
| 50.564417 | 199 | 0.58487 |
c6de4093e30b0e36784b53aa90cc5ef1bb027051
| 1,131 |
py
|
Python
|
fluid/text_matching_on_quora/configs/basic_config.py
|
phlrain/models
|
59adc0d6f38cd2351e16608d6c9d4e72dd5e7fea
|
[
"Apache-2.0"
] | 1 |
2018-11-23T10:29:49.000Z
|
2018-11-23T10:29:49.000Z
|
fluid/PaddleNLP/text_matching_on_quora/configs/basic_config.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | null | null | null |
fluid/PaddleNLP/text_matching_on_quora/configs/basic_config.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | 2 |
2018-06-14T13:59:36.000Z
|
2018-11-14T12:34:47.000Z
|
from __future__ import print_function
class config(object):
def __init__(self):
self.batch_size = 128
self.epoch_num = 50
self.optimizer_type = 'adam' # sgd, adagrad
# pretrained word embedding
self.use_pretrained_word_embedding = True
# when employing pretrained word embedding,
# out of vocabulary words' embedding is initialized with uniform or normal numbers
self.OOV_fill = 'uniform'
self.embedding_norm = False
# or else, use padding and masks for sequence data
self.use_lod_tensor = True
# lr = lr * lr_decay after each epoch
self.lr_decay = 1
self.learning_rate = 0.001
self.save_dirname = 'model_dir'
self.train_samples_num = 384348
self.duplicate_data = False
self.metric_type = ['accuracy']
def list_config(self):
print("config", self.__dict__)
def has_member(self, var_name):
return var_name in self.__dict__
if __name__ == "__main__":
basic = config()
basic.list_config()
basic.ahh = 2
basic.list_config()
| 26.302326 | 90 | 0.635721 |
eb146632cb3be5761b0d50bd7f5829b60284d624
| 77,932 |
py
|
Python
|
helpers/Gui.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
helpers/Gui.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
helpers/Gui.py
|
calebtrahan/KujiIn_Python
|
0599d36993fa1d5988a4cf3206a12fdbe63781d8
|
[
"MIT"
] | null | null | null |
from utils import Tools
def _fromUtf8(s): return s
import math
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from PyQt4 import QtCore, QtGui
from PyQt4.phonon import Phonon
from main_const import *
from helpers import Help
from helpers import Database, Reference
from utils import Tools
class KujiDesign(QMainWindow):
def __init__(self, mainprogram):
QMainWindow.__init__(self)
qss_file = open(STYLESHEET).read()
self.setStyleSheet(qss_file)
self.sessiondb = Database.SessionDatabase(self)
self.main = mainprogram
self.player = None
self.resize(1170, 830)
self.sessioncreated = False
app_icon = QtGui.QIcon()
app_icon.addFile(os.path.join(WORKINGDIRECTORY, "assets", "icons", "mainwinicon"), QtCore.QSize(16, 16))
self.setWindowIcon(app_icon)
self.initgui()
def viewgoals(self):
if self.sessiondb.goalsset:
self.sessiondb.displaycurrentgoals()
else:
QMessageBox.information(None, "No Goals Set",
"No Goals Set. Please Set A Goal",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
def viewcompletedgoals(self):
if self.sessiondb.completedgoalsset:
self.sessiondb.displaycompletedgoals()
else:
QMessageBox.information(None, "No Goals Completed Yet",
"Keep Up The Hard Work And You Will Achieve A Goal Soon...",
QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton)
def howtousethisprogram(self):
Help.KujiInHelp(self)
def goalcompleted(self, goaltext):
goalcompleteddialog = QDialog(self)
goalcompleteddialog.resize(406, 235)
goalcompleteddialog.setLayoutDirection(Qt.LeftToRight)
goalcompleteddialog.setAutoFillBackground(False)
goalcompleteddialog.setModal(False)
self.goalcompletedtopLabel = QLabel(goalcompleteddialog)
self.goalcompletedtopLabel.setGeometry(QRect(120, 10, 161, 20))
self.goalcompletedtopLabel.setStyleSheet("font: 12pt \"Arial\";\n"
"color: rgb(152, 166, 168);")
self.goalcompletedmiddleLabel = QLabel(goalcompleteddialog)
self.goalcompletedmiddleLabel.setGeometry(QRect(30, 40, 341, 21))
self.goalcompletedValue = QLabel(goalcompleteddialog)
self.goalcompletedValue.setGeometry(QRect(90, 70, 211, 51))
self.goalcompletedValue.setStyleSheet("border: 1px solid black;\n"
"font: 75 18pt \"Arial\";\n"
"color: rgb(152, 166, 168);")
self.goalcompletedValue.setAlignment(QtCore.Qt.AlignCenter)
self.goalcompletedbottomLabel = QtGui.QLabel(goalcompleteddialog)
self.goalcompletedbottomLabel.setGeometry(QtCore.QRect(10, 130, 391, 51))
self.goalcompletedbottomLabel.setAlignment(QtCore.Qt.AlignCenter)
self.goalcompletedbottomLabel.setWordWrap(True)
self.goalcompletedButton = QtGui.QPushButton(goalcompleteddialog)
self.goalcompletedButton.setGeometry(QtCore.QRect(310, 200, 84, 30))
goalcompleteddialog.setWindowTitle("Goal Completed")
self.goalcompletedtopLabel.setText("GOAL COMPLETED!")
self.goalcompletedmiddleLabel.setText("It Wasn\'t Easy, But You Stuck With It And Achieved Your Goal Of:")
self.goalcompletedValue.setText(goaltext)
self.goalcompletedbottomLabel.setText(
"Celebrate In Your Success, And Achieve The Next Goal You Have Set. Anything Is Possible!")
self.goalcompletedButton.setText("OK")
QtCore.QObject.connect(self.goalcompletedButton, QtCore.SIGNAL("clicked()"), goalcompleteddialog.accept)
goalcompleteddialog.exec_()
# Test Here If A New Goal Is Set, If Not Ask User To Set One!
def setgoalstatus(self):
"""Method To Set Goal Current, Percentage And Total On Startup And After A Session Is Finished"""
currenttext, percent, totaltext = self.sessiondb.getgoalstatus()
if None not in [currenttext, percent, totaltext]:
self.currentgoalLabel.setText(currenttext)
self.goalProgressBar.setValue(percent)
self.goalLabel.setText(totaltext)
def goalpacing(self):
if self.sessiondb.goalsset:
self.goalpacingDialog = QDialog(self)
self.goalpacingDialog.resize(400, 258)
self.goalpacingtopLabel = QtGui.QLabel(self.goalpacingDialog)
self.goalpacingtopLabel.setGeometry(QtCore.QRect(10, 20, 371, 81))
self.goalpacingtopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.goalpacingtopLabel.setWordWrap(True)
self.goalpacingtopLabel.setObjectName(_fromUtf8("goalpacingtopLabel"))
self.horizontalLayoutWidget = QtGui.QWidget(self.goalpacingDialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(110, 110, 171, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.goalpacingValuesLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.goalpacingValuesLayout.setMargin(0)
self.goalpacingValuesLayout.setObjectName(_fromUtf8("goalpacingValuesLayout"))
self.goalpaincgValue = QtGui.QSpinBox(self.horizontalLayoutWidget)
self.goalpaincgValue.setMaximum(7)
self.goalpaincgValue.setObjectName(_fromUtf8("goalpaincgValue"))
self.goalpacingValuesLayout.addWidget(self.goalpaincgValue)
self.goalpacingdaysLabel = QtGui.QLabel(self.horizontalLayoutWidget)
self.goalpacingdaysLabel.setObjectName(_fromUtf8("goalpacingdaysLabel"))
self.goalpacingValuesLayout.addWidget(self.goalpacingdaysLabel)
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.goalpacingDialog)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(200, 190, 188, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.goalpacingButtonLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.goalpacingButtonLayout.setMargin(0)
self.goalpacingButtonLayout.setObjectName(_fromUtf8("goalpacingButtonLayout"))
self.goalpacingcalculateButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.goalpacingcalculateButton.setObjectName(_fromUtf8("goalpacingcalculateButton"))
self.goalpacingButtonLayout.addWidget(self.goalpacingcalculateButton)
self.goalpacingcancelButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.goalpacingcancelButton.setObjectName(_fromUtf8("goalpacingcancelButton"))
self.goalpacingButtonLayout.addWidget(self.goalpacingcancelButton)
self.goalpacingDialog.setWindowTitle("Goal Pacing")
self.goalpacingtopLabel.setText(
"In Order To Calculate How Much You Need To Meditate To Reach Your Goals, I Need To Know How Many Days A Week You Will Be Meditating")
self.goalpacingdaysLabel.setText("Days A Week")
self.goalpacingcalculateButton.setText("CALCULATE")
self.goalpacingcancelButton.setText("CANCEl")
QtCore.QObject.connect(self.goalpacingcalculateButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.goalpacingDialog.accept)
QtCore.QObject.connect(self.goalpacingcancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.goalpacingDialog.reject)
ret = self.goalpacingDialog.exec_()
if ret == QDialog.Accepted:
if self.goalpaincgValue.value() != 0:
goalpacingtext = self.sessiondb.goalpacing(int(self.goalpaincgValue.value()))
self.statusBar.showMessage(goalpacingtext, 10000)
else:
self.statusBar.showMessage(
"You Must Practice At Least Once A Week For Me To Calculate Your Goal Pacing", 3000)
else:
QtGui.QMessageBox.information(None, "No Goals Set",
"You Must Set At Least One Goal In Order For Me To Calculate Goal Pacing For That Goal",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
def closeEvent(self, event):
"""Stuff To Do Right Before Gui Is Closed"""
self.player = self.main.sessionplayer
playing = (self.player.entrainmentObject.state() == Phonon.PlayingState)
if playing:
quit_msg = "End The Session Prematurely?"
reply = QtGui.QMessageBox.question(self, 'Confirmation End Session Prematurely',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.player.entrainmentObject.pause()
if self.AmbienceOption.isChecked():
self.player.ambienceObject.pause()
self.statusBar.showMessage("Session Is Currently Paused")
self.confirmationDialog2 = QtGui.QDialog(self)
self.confirmationDialog2.resize(434, 319)
self.prematureendingreason = QtGui.QTextEdit(self.confirmationDialog2)
self.prematureendingreason.setGeometry(QtCore.QRect(10, 50, 411, 221))
self.prematureendingreason.setObjectName(_fromUtf8("listView"))
self.label2 = QtGui.QLabel(self.confirmationDialog2)
self.label2.setGeometry(QtCore.QRect(10, 20, 411, 20))
self.label2.setAlignment(QtCore.Qt.AlignCenter)
self.horizontalLayoutWidget2 = QtGui.QWidget(self.confirmationDialog2)
self.horizontalLayoutWidget2.setGeometry(QtCore.QRect(80, 280, 341, 32))
self.horizontalLayoutWidget2.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget2)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.CreateButton = QtGui.QPushButton(self.horizontalLayoutWidget2)
self.CreateButton.setObjectName(_fromUtf8("pushButton_2"))
self.horizontalLayout.addWidget(self.CreateButton)
self.listofsessionsButton = QtGui.QPushButton(self.horizontalLayoutWidget2)
self.listofsessionsButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout.addWidget(self.listofsessionsButton)
self.confirmationDialog2.setWindowTitle("Premature Ending")
self.label2.setText("Enter The Reason You Are Ending The Session Early:")
self.CreateButton.setText("End Session")
self.listofsessionsButton.setText("Resume Session")
QtCore.QObject.connect(self.CreateButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.testprematureending)
QtCore.QObject.connect(self.listofsessionsButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.confirmationDialog2.reject)
result = self.confirmationDialog2.exec_()
if result == QDialog.Rejected:
self.statusBar.showMessage("Resuming Session...")
self.player.entrainmentObject.play()
if self.AmbienceOption.isChecked():
self.player.ambienceObject.play()
event.ignore()
else:
event.ignore()
else:
quit_msg = "Really Exit?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def testprematureending(self):
if self.prematureendingreason.toPlainText():
sessionexpectedlist = list()
for x, i in enumerate(self.slidervalues):
if i.value() != 0:
val = i.value()
cutname = self.main.cuts[x]["name"]
txt = "%s: %s " % (cutname, val)
sessionexpectedlist.append(txt)
self.statusBar.showMessage("Writing Reason To Database...")
reason = self.prematureendingreason.toPlainText()
self.sessiondb.writeprematureending(self.player.cutplayingname, sessionexpectedlist, reason)
self.confirmationDialog2.accept()
else:
QtGui.QMessageBox.information(None, "Reason Empty",
"Reason Cannot Be Blank In Order To End The Session Early",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
def aboutthisprogram(self):
"""Dialog Displaying Info About This Program"""
self.aboutdialog = QDialog(self)
self.aboutdialog.setObjectName(_fromUtf8("aboutdialog"))
self.aboutdialog.resize(433, 268)
self.aboutcloseButton = QtGui.QPushButton(self.aboutdialog)
self.aboutcloseButton.setGeometry(QtCore.QRect(324, 230, 90, 23))
self.aboutcloseButton.setObjectName(_fromUtf8("aboutcloseButton"))
self.aboutlabel = QtGui.QLabel(self.aboutdialog)
self.aboutlabel.setGeometry(QtCore.QRect(20, 20, 391, 21))
self.aboutlabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";"))
self.aboutlabel.setAlignment(QtCore.Qt.AlignCenter)
self.aboutlabel.setObjectName(_fromUtf8("aboutlabel"))
self.abouttext = QtGui.QTextBrowser(self.aboutdialog)
self.abouttext.setGeometry(QtCore.QRect(20, 50, 391, 170))
self.abouttext.setObjectName(_fromUtf8("abouttext"))
self.aboutHowToUseButton = QtGui.QPushButton(self.aboutdialog)
self.aboutHowToUseButton.setGeometry(QtCore.QRect(20, 230, 111, 23))
self.aboutHowToUseButton.setObjectName(_fromUtf8("aboutHowToUseButton"))
self.aboutdialog.setWindowTitle("About")
self.aboutcloseButton.setText("Close")
self.aboutlabel.setText("About The Kuji-In Program")
self.abouttext.setHtml(
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">This program was developed to be an aid to the practitioners of the Kuji-In through the use of brainwave entrainment technology and optional user-selected soundhelpers files. </span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">Program Development By Caleb Trahan (c) 2015 All Rights Reserved</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">Program Written In </span><span style=\" font-family:\'Sans Serif\'; font-size:10pt; text-decoration: underline; color:#f0651f;\">Python 3</span><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\">, Designed With </span><span style=\" font-family:\'Sans Serif\'; font-size:10pt; text-decoration: underline; color:#f0651f;\">Qt 4</span><span style=\" font-family:\'Sans Serif\'; font-size:10pt;\"></span> And pydub Used For Audio Manipulation</p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px; font-family:\'Sans Serif\'; font-size:10pt;\"><br /></p></body></html>")
self.aboutHowToUseButton.setText("Tutorials")
QtCore.QObject.connect(self.aboutHowToUseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.aboutdialog.accept)
QtCore.QObject.connect(self.aboutcloseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.aboutdialog.reject)
ret = self.aboutdialog.exec_()
if ret == QDialog.Accepted:
Help.KujiInHelp(self)
def displaylistofsession(self):
"""Method To Display The List Of Sessions Practiced"""
sessionlist = self.sessiondb.testifnosessions()
if not sessionlist:
QtGui.QMessageBox.information(None, "No Sessions",
"No Sessions Practiced Yet",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
else:
Database.DisplaySessionList(self, self.sessiondb)
def displayprematureendings(self):
"""Method To Display List Of Sessions Ended Early, If Any"""
a = Database.DisplayPrematureEndings(self, self.sessiondb)
isprematureendings = a.testforprematureendings()
if not isprematureendings:
QtGui.QMessageBox.information(None, "No Premature Endings",
"No Premature Endings, Excellent Work And Let's Keep It That Way!",
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
def calculatetotalsessiontime(self):
self.totalMinutesDisplay.display(0)
self.totalhoursDisplay.display(0)
totaltime = int()
for x, i in enumerate(self.slidervalues):
if i.value() != 0:
totaltime += i.value()
if totaltime > 0:
if totaltime >= 60:
hours = math.floor(totaltime / 60)
if hours != 0:
self.totalhoursDisplay.display(hours)
totaltime -= hours * 60
if totaltime != 0:
self.totalMinutesDisplay.display(totaltime)
else:
self.totalMinutesDisplay.display(totaltime)
else:
self.statusBar.showMessage("Nothing To Calculate. All Cuts Are Set To 0", 1000)
def displaytotalprogress(self):
rinlist = [self.rintotalhoursDisplay, self.rintotalminutesDisplay]
kyolist = [self.kyototalhoursDisplay, self.kyototalminutesDisplay]
tohlist = [self.tohtotalhoursDisplay, self.tohtotalminutesDisplay]
shalist = [self.shatotalhoursDisplay, self.shatotalminutesDisplay]
kailist = [self.kaitotalhoursDisplay, self.kaitotalminutesDisplay]
jinlist = [self.jintotalhoursDisplay, self.jintotalminutesDisplay]
retsulist = [self.retsutotalhoursDisplay, self.retsutotalminutesDisplay]
zailist = [self.zaitotalhoursDisplay, self.zaitotalminutesDisplay]
zenlist = [self.zentotalhoursDisplay, self.zentotalminutesDisplay]
totallist = [rinlist, kyolist, tohlist, shalist, kailist, jinlist, retsulist, zailist, zenlist]
for x, i in enumerate(totallist):
self.sessiondb.calculatetotalminutesforindividualcut(x, i)
return True
def newgoaldialog(self):
self.setgoalsdialog = QDialog(self)
self.setgoalsdialog.setObjectName(_fromUtf8("setgoalsdialog"))
self.setgoalsdialog.resize(434, 241)
self.setgoaldialogtopLabel = QtGui.QLabel(self.setgoalsdialog)
self.setgoaldialogtopLabel.setGeometry(QtCore.QRect(40, 30, 381, 16))
self.setgoaldialogtopLabel.setObjectName(_fromUtf8("setgoaldialogtopLabel"))
self.setgoaldialoggoalLabel = QtGui.QLabel(self.setgoalsdialog)
self.setgoaldialoggoalLabel.setGeometry(QtCore.QRect(130, 70, 59, 15))
self.setgoaldialoggoalLabel.setObjectName(_fromUtf8("setgoaldialoggoalLabel"))
self.horizontalLayoutWidget = QtGui.QWidget(self.setgoalsdialog)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(100, 90, 91, 41))
self.horizontalLayoutWidget.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.setgoalsdialoggoallayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget)
self.setgoalsdialoggoallayout.setMargin(0)
self.setgoalsdialoggoallayout.setObjectName(_fromUtf8("setgoalsdialoggoallayout"))
self.setgoaldialogvalue = QtGui.QSpinBox(self.horizontalLayoutWidget)
self.setgoaldialogvalue.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.setgoaldialogvalue.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.setgoaldialogvalue.setObjectName(_fromUtf8("setgoaldialogvalue"))
self.setgoalsdialoggoallayout.addWidget(self.setgoaldialogvalue)
self.setgoaldialoghrslabel = QtGui.QLabel(self.horizontalLayoutWidget)
self.setgoaldialoghrslabel.setObjectName(_fromUtf8("setgoaldialoghrslabel"))
self.setgoalsdialoggoallayout.addWidget(self.setgoaldialoghrslabel)
self.setgoaldialogDueDate = QtGui.QDateEdit(self.setgoalsdialog)
self.setgoaldialogDueDate.setDateTime(QDateTime.currentDateTime())
self.setgoaldialogDueDate.setCalendarPopup(True)
self.setgoaldialogDueDate.setGeometry(QtCore.QRect(220, 100, 110, 22))
self.setgoaldialogDueDate.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.setgoaldialogDueDate.setButtonSymbols(QtGui.QAbstractSpinBox.PlusMinus)
self.setgoaldialogDueDate.setDisplayFormat(_fromUtf8(""))
self.setgoaldialogDueDate.setObjectName(_fromUtf8("setgoaldialogDueDate"))
self.setgoalduedateLabel = QtGui.QLabel(self.setgoalsdialog)
self.setgoalduedateLabel.setGeometry(QtCore.QRect(240, 70, 61, 20))
self.setgoalduedateLabel.setObjectName(_fromUtf8("setgoalduedateLabel"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.setgoalsdialog)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(90, 180, 334, 41))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.setdialogbuttonslayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.setdialogbuttonslayout.setMargin(0)
self.setdialogbuttonslayout.setObjectName(_fromUtf8("setdialogbuttonslayout"))
self.setgoaldialoggoallistButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.setgoaldialoggoallistButton.setObjectName(_fromUtf8("pushButton"))
self.setdialogbuttonslayout.addWidget(self.setgoaldialoggoallistButton)
self.setgoaldialogAcceptButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.setgoaldialogAcceptButton.setObjectName(_fromUtf8("setgoaldialogAcceptButton"))
self.setdialogbuttonslayout.addWidget(self.setgoaldialogAcceptButton)
self.setgoaldialogCancelButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.setgoaldialogCancelButton.setObjectName(_fromUtf8("setgoaldialogCancelButton"))
self.setdialogbuttonslayout.addWidget(self.setgoaldialogCancelButton)
self.setgoalsdialog.setWindowTitle("Set A New Goal")
currenthours = self.sessiondb.calculatetotalhours()
self.setgoaldialogtopLabel.setText("You Have Practiced For %s Hours. Please Set A New Goal:" % currenthours)
self.setgoaldialoggoalLabel.setText("GOAL")
self.setgoaldialoghrslabel.setText("hrs")
self.setgoalduedateLabel.setText("Due Date")
self.setgoaldialogAcceptButton.setText("Set This Goal")
self.setgoaldialoggoallistButton.setText("Current Goals")
self.setgoaldialogCancelButton.setText("Cancel")
QtCore.QObject.connect(self.setgoaldialogAcceptButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.checkgoals)
QtCore.QObject.connect(self.setgoaldialoggoallistButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.viewgoals)
QtCore.QObject.connect(self.setgoaldialogCancelButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.setgoalsdialog.reject)
self.setgoalsdialog.exec_()
def checkgoals(self):
goaldate = self.setgoaldialogDueDate.dateTime().toPyDateTime()
goalhours = int(self.setgoaldialogvalue.value())
valueisgood = self.sessiondb.checknewgoals(goaldate, goalhours)
if isinstance(valueisgood, bool):
self.statusBar.showMessage("Adding Goal...")
self.sessiondb.insertgoal(goaldate, goalhours)
self.setgoalsdialog.accept()
self.setgoalstatus()
self.statusBar.showMessage("Goal Successfully Added", 3000)
else:
QtGui.QMessageBox.critical(None, "Error Adding Goal",
valueisgood,
QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,
QtGui.QMessageBox.NoButton)
return
def setsignalsandslots(self):
"""Signals And Slot Bindings For Main GUI"""
QtCore.QObject.connect(self.preSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.preDisplay_2.display)
QtCore.QObject.connect(self.rinSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.rinDisplay_2.display)
QtCore.QObject.connect(self.kyoSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.kyoDisplay_2.display)
QtCore.QObject.connect(self.tohSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.tohDisplay_2.display)
QtCore.QObject.connect(self.shaSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.shaDisplay_2.display)
QtCore.QObject.connect(self.kaiSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.kaiDisplay_2.display)
QtCore.QObject.connect(self.jinSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.jinDisplay_2.display)
QtCore.QObject.connect(self.retsuSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.retsuDisplay_2.display)
QtCore.QObject.connect(self.zaiSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.zaiDisplay_2.display)
QtCore.QObject.connect(self.zenSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.zenDisplay_2.display)
QtCore.QObject.connect(self.postSlider_2, QtCore.SIGNAL(_fromUtf8("valueChanged(int)")),
self.postDisplay_2.display)
QtCore.QObject.connect(self.CreateButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.createsession)
QtCore.QObject.connect(self.listofsessionsButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.displaylistofsession)
QtCore.QObject.connect(self.AmbienceOption, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")),
self.main.ambiencecheckboxchecked)
QtCore.QObject.connect(self.ReferenceDisplayOption, QtCore.SIGNAL(_fromUtf8("stateChanged(int)")),
self.main.referenceboxchecked)
QtCore.QObject.connect(self.prematureendingsbutton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.displayprematureendings)
QtCore.QObject.connect(self.PlayButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.playsession)
QtCore.QObject.connect(self.PauseButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.pausesession)
QtCore.QObject.connect(self.StopButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.stopsession)
QtCore.QObject.connect(self.exportButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.main.exportsession)
QtCore.QObject.connect(self.calculateTotalSessionTimeButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.calculatetotalsessiontime)
QtCore.QObject.connect(self.viewgoalsButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.viewgoals)
QtCore.QObject.connect(self.completedgoalsButton, QtCore.SIGNAL(_fromUtf8("clicked()")),
self.viewcompletedgoals)
QtCore.QObject.connect(self.setgoalButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.newgoaldialog)
QtCore.QObject.connect(self.goalpacingButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.goalpacing)
QtCore.QObject.connect(self.changeallvaluesButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.changeallvalues)
# self.setnewmaxslidervalue()
self.displaytotalprogress()
def changeallvalues(self):
"""Method With A Dialog To Set All Values To _____"""
self.changevaluesdialog = QtGui.QDialog(self)
self.changevaluesdialog.setObjectName(_fromUtf8("changevaluedialog"))
self.changevaluesdialog.resize(395, 130)
self.changevaluesdialog.setMinimumSize(QtCore.QSize(0, 100))
self.changeValueTopLabel = QtGui.QLabel(self.changevaluesdialog)
self.changeValueTopLabel.setGeometry(QtCore.QRect(70, 30, 131, 20))
self.changeValueTopLabel.setObjectName(_fromUtf8("changeValueTopLabel"))
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.changevaluesdialog)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(20, 60, 337, 51))
self.horizontalLayoutWidget_2.setObjectName(_fromUtf8("horizontalLayoutWidget_2"))
self.changeValueLayout2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.changeValueLayout2.setMargin(0)
self.changeValueLayout2.setObjectName(_fromUtf8("changeValueLayout2"))
self.changeValuecheckbox = QtGui.QCheckBox(self.horizontalLayoutWidget_2)
self.changeValuecheckbox.setObjectName(_fromUtf8("changeValuecheckbox"))
self.changeValueLayout2.addWidget(self.changeValuecheckbox)
self.pushButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.changeValueLayout2.addWidget(self.pushButton)
self.StopButton = QtGui.QPushButton(self.horizontalLayoutWidget_2)
self.StopButton.setObjectName(_fromUtf8("pushButton_2"))
self.changeValueLayout2.addWidget(self.StopButton)
self.changeValuespinbox = QtGui.QSpinBox(self.changevaluesdialog)
self.changeValuespinbox.setGeometry(QtCore.QRect(210, 30, 51, 20))
# self.changeValuespinbox.setLayoutDirection(QtCore.Qt.RightToLeft)
self.changeValuespinbox.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)
self.changeValuespinbox.setObjectName(_fromUtf8("changeValuespinbox"))
self.changeValueminuteslabel = QtGui.QLabel(self.changevaluesdialog)
self.changeValueminuteslabel.setGeometry(QtCore.QRect(270, 20, 91, 39))
self.changevaluesdialog.setWindowTitle("Change All Values")
self.changeValueTopLabel.setText("Change All Values To:")
self.changeValuecheckbox.setText("Include Pre And Post")
self.changeValueminuteslabel.setText("minute(s)")
self.pushButton.setText("OK")
self.StopButton.setText("Cancel")
QtCore.QObject.connect(self.pushButton, QtCore.SIGNAL("clicked()"), self.changevaluesdialog.accept)
QtCore.QObject.connect(self.StopButton, QtCore.SIGNAL("clicked()"), self.changevaluesdialog.reject)
ret = self.changevaluesdialog.exec_()
if ret == QDialog.Accepted:
value = self.changeValuespinbox.value()
if self.changeValuecheckbox.isChecked():
for x, i in enumerate(self.slidervalues):
i.setValue(value)
else:
for x, i in enumerate(self.slidervalues):
if x not in [0, 10]:
i.setValue(value)
def initgui(self):
"""Method To Setup Gui Bindings"""
self.centralwidget = self
self.centralwidget.setObjectName(_fromUtf8("Kuji-In Session Creator"))
self.frame = QtGui.QFrame(self.centralwidget)
self.frame.setGeometry(QtCore.QRect(0, 0, 1200, 821))
# self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
# self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
######## Creating Session Top Cutname Labels
self.horizontalLayoutWidget_3 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget_3.setGeometry(QtCore.QRect(36, 80, 716, 51))
self.DurationLabels_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_3)
self.DurationLabels_2.setMargin(0)
self.preLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.preLabel_2)
self.rinLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.rinLabel_2)
self.kyoLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.kyoLabel_2)
self.tohLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.tohLabel_2)
self.shaLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.shaLabel_2)
self.kaiLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.kaiLabel_2)
self.jinLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.jinLabel_2)
self.retsuLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.retsuLabel_2)
self.zaiLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.zaiLabel_2)
self.zenLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.zenLabel_2)
self.postLabel_2 = QtGui.QLabel(self.horizontalLayoutWidget_3)
self.DurationLabels_2.addWidget(self.postLabel_2)
creatingsessiontopcutnamelabels = [
self.preLabel_2, self.rinLabel_2, self.kyoLabel_2, self.tohLabel_2, self.shaLabel_2, self.kaiLabel_2,
self.jinLabel_2, self.retsuLabel_2, self.zaiLabel_2, self.zenLabel_2, self.postLabel_2
]
for i in creatingsessiontopcutnamelabels:
i.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"border-top: 1px solid black;\n"
"border-left: 1px solid black;\n"
"border-right: 1px solid black;"))
i.setAlignment(QtCore.Qt.AlignCenter)
### Creating Session Slider
self.sessionslidersLayout = QtGui.QWidget(self.frame)
self.sessionslidersLayout.setGeometry(QtCore.QRect(10, 130, 761, 331))
self.DurationSliders_2 = QtGui.QHBoxLayout(self.sessionslidersLayout)
self.DurationSliders_2.setContentsMargins(-1, -1, 0, -1)
self.preSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.preSlider_2)
self.rinSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.rinSlider_2)
self.kyoSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.kyoSlider_2)
self.tohSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.tohSlider_2)
self.shaSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.shaSlider_2)
self.kaiSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.kaiSlider_2)
self.jinSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.jinSlider_2)
self.retsuSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.retsuSlider_2)
self.zaiSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.zaiSlider_2)
self.zenSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.zenSlider_2)
self.postSlider_2 = QtGui.QSlider(self.sessionslidersLayout)
self.DurationSliders_2.addWidget(self.postSlider_2)
creatingsessionsliders = [
self.preSlider_2, self.rinSlider_2, self.kyoSlider_2, self.tohSlider_2, self.shaSlider_2, self.kaiSlider_2,
self.jinSlider_2, self.retsuSlider_2, self.zaiSlider_2, self.zenSlider_2, self.postSlider_2
]
for i in creatingsessionsliders:
i.setMaximum(90)
i.setSingleStep(5)
i.setPageStep(5)
i.setOrientation(QtCore.Qt.Vertical)
i.setTickPosition(QtGui.QSlider.TicksBothSides)
self.toptotalsLabel = QtGui.QLabel(self.frame)
self.toptotalsLabel.setGeometry(QtCore.QRect(280, 40, 221, 21))
self.toptotalsLabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";\n"
"color: #98A6A8;"))
self.toptotalsLabel.setAlignment(QtCore.Qt.AlignCenter)
self.toptotalsLabel.setObjectName(_fromUtf8("toptotalsLabel"))
######## Values Below Sliders On Session Creator
self.horizontalLayoutWidget_2 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(20, 470, 721, 41))
self.CutDurationDisplays_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.setMargin(0)
self.preDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.preDisplay_2)
self.rinDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.rinDisplay_2)
self.kyoDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.kyoDisplay_2)
self.tohDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.tohDisplay_2)
self.shaDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.shaDisplay_2)
self.kaiDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.kaiDisplay_2)
self.jinDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.jinDisplay_2)
self.retsuDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.retsuDisplay_2)
self.zaiDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.zaiDisplay_2)
self.zenDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
self.CutDurationDisplays_2.addWidget(self.zenDisplay_2)
self.postDisplay_2 = QtGui.QLCDNumber(self.horizontalLayoutWidget_2)
lcdvaluesbelowsliders = [
self.preDisplay_2, self.rinDisplay_2, self.kyoDisplay_2, self.tohDisplay_2, self.shaDisplay_2,
self.kaiDisplay_2, self.jinDisplay_2, self.retsuDisplay_2, self.zaiDisplay_2, self.zenDisplay_2,
self.postDisplay_2
]
for i in lcdvaluesbelowsliders:
i.setNumDigits(3)
self.CutDurationDisplays_2.addWidget(self.postDisplay_2)
######## CutName Labels On Total Progress Display
self.verticalLayoutWidget = QtGui.QWidget(self.frame)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(790, 80, 91, 432))
self.totalprogressLabels = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.totalprogressLabels.setMargin(0)
self.rintotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.rintotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.rintotalLabel)
self.kyototalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.kyototalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.kyototalLabel)
self.tohtotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.tohtotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.tohtotalLabel)
self.shatotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.shatotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.shatotalLabel)
self.kaitotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.kaitotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.kaitotalLabel)
self.jintotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.jintotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.jintotalLabel)
self.retsutotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.retsutotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.retsutotalLabel)
self.zaitotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.zaitotalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.totalprogressLabels.addWidget(self.zaitotalLabel)
self.zentotalLabel = QtGui.QLabel(self.verticalLayoutWidget)
self.zentotalLabel.setAlignment(QtCore.Qt.AlignCenter)
cutnamelabels = [
self.rintotalLabel, self.kyototalLabel, self.tohtotalLabel, self.shatotalLabel, self.kaitotalLabel,
self.jintotalLabel, self.retsutotalLabel, self.zaitotalLabel, self.zentotalLabel
]
for i in cutnamelabels:
i.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"border-left: 1px solid black;\n"
"border-top: 1px solid black;\n"
"border-bottom: 1px solid black;"))
self.totalprogressLabels.addWidget(self.zentotalLabel)
######## Hours LCD Numbers On Total Progress Display
self.verticalLayoutWidget_4 = QtGui.QWidget(self.frame)
self.verticalLayoutWidget_4.setGeometry(QtCore.QRect(880, 80, 66, 432))
self.totalhoursLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_4)
self.totalhoursLayout.setMargin(0)
self.rintotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.rintotalhoursDisplay)
self.kyototalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.kyototalhoursDisplay)
self.tohtotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.tohtotalhoursDisplay)
self.shatotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.shatotalhoursDisplay)
self.kaitotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.kaitotalhoursDisplay)
self.jintotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.jintotalhoursDisplay)
self.retsutotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.retsutotalhoursDisplay)
self.zaitotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
self.totalhoursLayout.addWidget(self.zaitotalhoursDisplay)
self.zentotalhoursDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_4)
lcdhoursdisplays = [
self.rintotalhoursDisplay, self.kyototalhoursDisplay,self.tohtotalhoursDisplay, self.shatotalhoursDisplay,
self.kaitotalhoursDisplay, self.jintotalhoursDisplay, self.retsutotalhoursDisplay, self.zaitotalhoursDisplay,
self.zentotalhoursDisplay
]
for i in lcdhoursdisplays:
i.setNumDigits(4)
i.setStyleSheet(_fromUtf8("color: rgb(187, 204, 207);\n"
"border-top: 1px solid black;\n"
"border-bottom: 1px solid black;"))
self.totalhoursLayout.addWidget(self.zentotalhoursDisplay)
######## 'Hours' And 'Minutes' Labels On Total Progress
self.verticalLayoutWidget_5 = QtGui.QWidget(self.frame)
self.verticalLayoutWidget_5.setGeometry(QtCore.QRect(946, 80, 55, 432))
self.totalhoursLabels = QtGui.QVBoxLayout(self.verticalLayoutWidget_5)
self.totalhoursLabels.setMargin(0)
self.label_11 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_11)
self.label_14 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_14)
self.label_15 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_15)
self.label_18 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_18)
self.label_17 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_17)
self.label_16 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_16)
self.label_13 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_13)
self.label_12 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_12)
self.label_10 = QtGui.QLabel(self.verticalLayoutWidget_5)
self.totalhoursLabels.addWidget(self.label_10)
self.verticalLayoutWidget_6 = QtGui.QWidget(self.frame)
self.verticalLayoutWidget_6.setGeometry(QtCore.QRect(1000.5, 80, 66, 432))
self.totalminutesLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_6)
self.totalminutesLayout.setMargin(0)
self.rintotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.rintotalminutesDisplay)
self.kyototalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.kyototalminutesDisplay)
self.tohtotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.tohtotalminutesDisplay)
self.shatotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.shatotalminutesDisplay)
self.kaitotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.kaitotalminutesDisplay)
self.jintotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.jintotalminutesDisplay)
self.retsutotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.retsutotalminutesDisplay)
self.zaitotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.zaitotalminutesDisplay)
self.zentotalminutesDisplay = QtGui.QLCDNumber(self.verticalLayoutWidget_6)
self.totalminutesLayout.addWidget(self.zentotalminutesDisplay)
self.verticalLayoutWidget_7 = QtGui.QWidget(self.frame)
self.verticalLayoutWidget_7.setGeometry(QtCore.QRect(1064, 80, 71, 432))
self.totalminuteslabelsLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.setMargin(0)
self.label_19 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_19)
self.label_23 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_23)
self.label_22 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_22)
self.label_21 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_21)
self.label_25 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_25)
self.label_20 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_20)
self.label_26 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_26)
self.label_24 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_24)
self.label_27 = QtGui.QLabel(self.verticalLayoutWidget_7)
self.totalminuteslabelsLayout.addWidget(self.label_27)
descriptionlabelslist = [
self.label_10, self.label_11, self.label_12, self.label_13, self.label_14, self.label_15, self.label_16,
self.label_17, self.label_18, self.label_19, self.label_20, self.label_21, self.label_22, self.label_23,
self.label_24, self.label_25, self.label_26, self.label_27
]
minuteslcdlist = [
self.rintotalminutesDisplay, self.kyototalminutesDisplay, self.tohtotalminutesDisplay,
self.shatotalminutesDisplay, self.kaitotalminutesDisplay, self.jintotalminutesDisplay,
self.retsutotalminutesDisplay, self.zaitotalminutesDisplay, self.zentotalminutesDisplay
]
for i in minuteslcdlist:
i.setStyleSheet(_fromUtf8("color: rgb(187, 204, 207);\n" # Styles For LCDDisplay
"border-top: 1px solid black;\n"
"border-bottom: 1px solid black;"))
i.setNumDigits(4)
for i in descriptionlabelslist:
i.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"border-right: 1px solid black;\n"
"border-top: 1px solid black;\n"
"border-bottom: 1px solid black;\n"))
i.setAlignment(QtCore.Qt.AlignCenter)
########
self.horizontalLayoutWidget_6 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget_6.setGeometry(QtCore.QRect(790, 17, 491, 61))
self.horizontalLayoutWidget_6.setObjectName(_fromUtf8("horizontalLayoutWidget_6"))
self.horizontalLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_6)
self.horizontalLayout.setMargin(0)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.topsessionLabel = QtGui.QLabel(self.frame)
self.topsessionLabel.setGeometry(QtCore.QRect(796, 36, 335, 31))
self.topsessionLabel.setStyleSheet(_fromUtf8("color: #98A6A8;\n"
"font: 14pt \"Arial Black\";"))
self.topsessionLabel.setAlignment(QtCore.Qt.AlignCenter)
self.topsessionLabel.setObjectName(_fromUtf8("topsessionLabel"))
# self.horizontalLayout.addWidget(self.topsessionLabel)
self.horizontalLayoutWidget_4 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget_4.setGeometry(QtCore.QRect(784, 517, 361, 50))
self.horizontalLayoutWidget_4.setObjectName(_fromUtf8("horizontalLayoutWidget_4"))
self.horizontalLayout_4 = QtGui.QHBoxLayout(self.horizontalLayoutWidget_4)
self.listofsessionsButton = QtGui.QPushButton(self.horizontalLayoutWidget_6)
self.listofsessionsButton.setObjectName(_fromUtf8("pushButton"))
self.horizontalLayout_4.addWidget(self.listofsessionsButton)
self.prematureendingsbutton = QtGui.QPushButton(self.horizontalLayoutWidget_6)
self.prematureendingsbutton.setObjectName(_fromUtf8("pushButton_3"))
self.horizontalLayout_4.addWidget(self.prematureendingsbutton)
# self.horizontalLayout.addLayout(self.horizontalLayout_4)
self.horizontalLayoutWidget2 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget2.setGeometry(QtCore.QRect(30, 585, 310, 31))
self.horizontalLayoutWidget2.setObjectName(_fromUtf8("horizontalLayoutWidget"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.horizontalLayoutWidget2)
self.horizontalLayout_2.setMargin(0)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.AmbienceOption = QtGui.QCheckBox(self.horizontalLayoutWidget2)
self.AmbienceOption.setObjectName(_fromUtf8("AmbienceOption"))
self.horizontalLayout_2.addWidget(self.AmbienceOption)
self.ReferenceDisplayOption = QtGui.QCheckBox(self.horizontalLayoutWidget2)
self.ReferenceDisplayOption.setObjectName(_fromUtf8("ReferebceDisplayOption"))
self.horizontalLayout_2.addWidget(self.ReferenceDisplayOption)
self.widget = QtGui.QWidget(self.frame)
self.widget.setGeometry(QtCore.QRect(10, 760, 311, 33))
self.widget.setObjectName(_fromUtf8("widget"))
self.widget1 = QtGui.QWidget(self.frame)
self.widget1.setGeometry(QtCore.QRect(390, 580, 349, 41))
self.widget1.setObjectName(_fromUtf8("widget1"))
self.actionsbuttonsLayout = QtGui.QHBoxLayout(self.widget1)
self.actionsbuttonsLayout.setMargin(0)
self.actionsbuttonsLayout.setObjectName(_fromUtf8("actionsbuttonsLayout"))
self.CreateButton = QtGui.QPushButton(self.widget1)
self.CreateButton.setObjectName(_fromUtf8("CreateButton"))
self.actionsbuttonsLayout.addWidget(self.CreateButton)
self.exportButton = QtGui.QPushButton(self.widget1)
self.exportButton.setObjectName(_fromUtf8("exportButton"))
self.actionsbuttonsLayout.addWidget(self.exportButton)
self.sessionPlayerFrame = QtGui.QFrame(self.frame)
self.sessionPlayerFrame.setGeometry(QtCore.QRect(20, 640, 722, 165))
self.sessionPlayerFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.sessionPlayerFrame.setFrameShadow(QtGui.QFrame.Raised)
self.sessionPlayerFrame.setObjectName(_fromUtf8("sessionPlayerFrame"))
self.widget1 = QtGui.QWidget(self.sessionPlayerFrame)
self.widget1.setGeometry(QtCore.QRect(20, 40, 311, 33))
self.widget1.setObjectName(_fromUtf8("widget1"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.widget1)
self.horizontalLayout_3.setMargin(0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.PlayButton = QtGui.QPushButton(self.widget1)
self.PlayButton.setObjectName(_fromUtf8("PlayButton"))
self.horizontalLayout_3.addWidget(self.PlayButton)
self.PauseButton = QtGui.QPushButton(self.widget1)
self.PauseButton.setObjectName(_fromUtf8("PauseButton"))
self.horizontalLayout_3.addWidget(self.PauseButton)
self.StopButton = QtGui.QPushButton(self.widget1)
self.StopButton.setObjectName(_fromUtf8("pushButton_2"))
self.horizontalLayout_3.addWidget(self.StopButton)
self.EntrainmentVolumeSlider = Phonon.VolumeSlider(self.sessionPlayerFrame)
self.EntrainmentVolumeSlider.setGeometry(QtCore.QRect(2, 120, 150, 20))
self.EntrainmentVolumeSlider.setOrientation(QtCore.Qt.Horizontal)
self.EntrainmentVolumeSlider.setObjectName(_fromUtf8("EntrainmentVolumeSlider"))
fonttop = QtGui.QFont()
fonttop.setPointSize(14)
self.sessionPlayertopLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.sessionPlayertopLabel.setFont(fonttop)
# self.sessionPlayertopLabel.setGeometry(QtCore.QRect(310, 10, 91, 16))
self.sessionPlayertopLabel.setGeometry(QtCore.QRect(2, 10, 718, 20))
self.sessionPlayertopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.sessionPlayertopLabel.setObjectName(_fromUtf8("sessionPlayertopLabel"))
self.EntrainmentVolumeTopLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.EntrainmentVolumeTopLabel.setGeometry(QtCore.QRect(30, 90, 121, 16))
self.EntrainmentVolumeTopLabel.setObjectName(_fromUtf8("EntrainmentVolumeTopLabel"))
self.AmbienceVolumeTopLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.AmbienceVolumeTopLabel.setGeometry(QtCore.QRect(220, 90, 111, 16))
self.AmbienceVolumeTopLabel.setObjectName(_fromUtf8("AmbienceVolumeTopLabel"))
self.AmbienceVolumeSlider = Phonon.VolumeSlider(self.sessionPlayerFrame)
self.AmbienceVolumeSlider.setGeometry(QtCore.QRect(186, 120, 150, 20))
self.AmbienceVolumeSlider.setOrientation(QtCore.Qt.Horizontal)
self.AmbienceVolumeSlider.setObjectName(_fromUtf8("AmbienceVolumeSlider"))
font = QtGui.QFont()
font.setPointSize(11)
self.currentlyPlayingtopLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.currentlyPlayingtopLabel.setFont(font)
self.currentlyPlayingtopLabel.setGeometry(QtCore.QRect(390, 30, 131, 21))
self.currentlyPlayingtopLabel.setObjectName(_fromUtf8("currentlyPlayingtopLabel"))
self.CutPlayingName = QtGui.QLabel(self.sessionPlayerFrame)
self.CutPlayingName.setFont(font)
self.CutPlayingName.setGeometry(QtCore.QRect(550, 30, 151, 20))
self.CutPlayingName.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.CutPlayingName.setObjectName(_fromUtf8("CutPlayingName"))
self.thiscutprogressDescriptionLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.thiscutprogressDescriptionLabel.setFont(font)
self.thiscutprogressDescriptionLabel.setGeometry(QtCore.QRect(390, 60, 171, 21))
self.thiscutprogressDescriptionLabel.setObjectName(_fromUtf8("thiscutprogressDescriptionLabel"))
self.CutPlayingProgressActual = QtGui.QLabel(self.sessionPlayerFrame)
self.CutPlayingProgressActual.setFont(font)
self.CutPlayingProgressActual.setGeometry(QtCore.QRect(540, 60, 161, 20))
self.CutPlayingProgressActual.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.CutPlayingProgressActual.setObjectName(_fromUtf8("CutPlayingProgressActual"))
self.TotalSessionProgressDescriptionLabel = QtGui.QLabel(self.sessionPlayerFrame)
self.TotalSessionProgressDescriptionLabel.setFont(font)
self.TotalSessionProgressDescriptionLabel.setGeometry(QtCore.QRect(390, 90, 181, 21))
self.TotalSessionProgressDescriptionLabel.setObjectName(_fromUtf8("TotalSessionProgressDescriptionLabel"))
self.TotalSessionPlayingProgressActual = QtGui.QLabel(self.sessionPlayerFrame)
self.TotalSessionPlayingProgressActual.setFont(font)
self.TotalSessionPlayingProgressActual.setGeometry(QtCore.QRect(540, 90, 161, 20))
self.TotalSessionPlayingProgressActual.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.TotalSessionPlayingProgressActual.setObjectName(_fromUtf8("TotalSessionPlayingProgressActual"))
self.OpenThisCutsReferenceFilesButton = QtGui.QPushButton(self.sessionPlayerFrame)
self.OpenThisCutsReferenceFilesButton.setGeometry(QtCore.QRect(534, 126, 170, 31))
self.OpenThisCutsReferenceFilesButton.setObjectName(_fromUtf8("OpenThisCutsReferenceFilesButton"))
self.goalsFrame = QtGui.QFrame(self.frame)
self.goalsFrame.setGeometry(QtCore.QRect(750, 570, 411, 234))
self.goalsFrame.setFrameShape(QtGui.QFrame.StyledPanel)
self.goalsFrame.setFrameShadow(QtGui.QFrame.Raised)
self.goalsFrame.setObjectName(_fromUtf8("goalsFrame"))
self.layoutWidget2 = QtGui.QWidget(self.goalsFrame)
self.layoutWidget2.setGeometry(QtCore.QRect(30, 50, 361, 100))
self.layoutWidget2.setObjectName(_fromUtf8("layoutWidget2"))
self.goalsVLayout = QtGui.QVBoxLayout(self.layoutWidget2)
self.goalsVLayout.setObjectName(_fromUtf8("goalsVLayout"))
self.goallabelsLayout = QtGui.QHBoxLayout()
self.goallabelsLayout.setObjectName(_fromUtf8("goallabelsLayout"))
self.currenttopLabel = QtGui.QLabel(self.layoutWidget2)
self.currenttopLabel.setStyleSheet(_fromUtf8("border-top: 1px solid black;\n"
"border-left: 1px solid black;\n"
"border-right: 1px solid black;"))
self.currenttopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.currenttopLabel.setObjectName(_fromUtf8("currenttopLabel"))
self.goallabelsLayout.addWidget(self.currenttopLabel)
self.goaltopLabel = QtGui.QLabel(self.layoutWidget2)
self.goaltopLabel.setStyleSheet(_fromUtf8("border-top: 1px solid black;\n"
"border-right: 1px solid black;\n"
"border-left: 1px solid black;"))
self.goaltopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.goaltopLabel.setObjectName(_fromUtf8("goaltopLabel"))
self.goallabelsLayout.addWidget(self.goaltopLabel)
self.goalsVLayout.addLayout(self.goallabelsLayout)
self.goalactualLayout = QtGui.QHBoxLayout()
self.goalactualLayout.setObjectName(_fromUtf8("goalactualLayout"))
self.currentgoalLabel = QtGui.QLabel(self.layoutWidget2)
self.currentgoalLabel.setMinimumSize(QtCore.QSize(100, 0))
self.currentgoalLabel.setBaseSize(QtCore.QSize(0, 0))
self.currentgoalLabel.setStyleSheet(_fromUtf8("border-left: 1px solid black;\n"
"border-bottom: 1px solid black;\n"
"border-right: 1px solid black;"))
self.currentgoalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.currentgoalLabel.setObjectName(_fromUtf8("currentgoalLabel"))
self.goalactualLayout.addWidget(self.currentgoalLabel)
self.goalLabel = QtGui.QLabel(self.layoutWidget2)
self.goalLabel.setMinimumSize(QtCore.QSize(100, 0))
self.goalLabel.setStyleSheet(_fromUtf8("border-right: 1px solid black;\n"
"border-left: 1px solid black;\n"
"border-bottom: 1px solid black;"))
self.goalLabel.setAlignment(QtCore.Qt.AlignCenter)
self.goalLabel.setObjectName(_fromUtf8("goalLabel"))
self.goalactualLayout.addWidget(self.goalLabel)
self.goalsVLayout.addLayout(self.goalactualLayout)
self.goalProgressBar = QtGui.QProgressBar(self.layoutWidget2)
self.goalProgressBar.setProperty("value", 24)
self.goalProgressBar.setObjectName(_fromUtf8("goalProgressBar"))
self.goalsVLayout.addWidget(self.goalProgressBar)
self.verticalLayoutWidget_2 = QtGui.QWidget(self.goalsFrame)
self.verticalLayoutWidget_2.setGeometry(QtCore.QRect(30, 152, 181, 81))
self.verticalLayoutWidget_2.setObjectName(_fromUtf8("verticalLayoutWidget_2"))
self.goalsButtonLayoutLeft = QtGui.QVBoxLayout(self.verticalLayoutWidget_2)
self.goalsButtonLayoutLeft.setObjectName(_fromUtf8("goalsButtonLayoutLeft"))
self.setgoalButton = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.setgoalButton.setObjectName(_fromUtf8("setgoalButton"))
self.goalsButtonLayoutLeft.addWidget(self.setgoalButton)
self.viewgoalsButton = QtGui.QPushButton(self.verticalLayoutWidget_2)
self.viewgoalsButton.setObjectName(_fromUtf8("viewgoalsButton"))
self.goalsButtonLayoutLeft.addWidget(self.viewgoalsButton)
self.verticalLayoutWidget_3 = QtGui.QWidget(self.goalsFrame)
self.verticalLayoutWidget_3.setGeometry(QtCore.QRect(210, 152, 181, 81))
self.verticalLayoutWidget_3.setObjectName(_fromUtf8("verticalLayoutWidget_3"))
self.goalsButtonLayoutRight = QtGui.QVBoxLayout(self.verticalLayoutWidget_3)
self.goalsButtonLayoutRight.setObjectName(_fromUtf8("goalsButtonLayoutRight"))
self.goalpacingButton = QtGui.QPushButton(self.verticalLayoutWidget_3)
self.goalpacingButton.setObjectName(_fromUtf8("goalpacingButton"))
self.goalsButtonLayoutRight.addWidget(self.goalpacingButton)
self.completedgoalsButton = QtGui.QPushButton(self.verticalLayoutWidget_3)
self.completedgoalsButton.setObjectName(_fromUtf8("completedgoalsButton"))
self.goalsButtonLayoutRight.addWidget(self.completedgoalsButton)
self.progresstopLabel = QtGui.QLabel(self.goalsFrame)
self.progresstopLabel.setGeometry(QtCore.QRect(60, 20, 290, 21))
self.progresstopLabel.setMinimumSize(QtCore.QSize(290, 0))
self.progresstopLabel.setAlignment(QtCore.Qt.AlignCenter)
self.progresstopLabel.setStyleSheet(_fromUtf8("font: 14pt \"Arial Black\";\n"
"color: #98A6A8;"))
self.progresstopLabel.setObjectName(_fromUtf8("progresstopLabel"))
self.statusBar = QtGui.QStatusBar(self)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
self.setStatusBar(self.statusBar)
# self.statusBar.showMessage("Status Bar Is Still There")
self.menuBar = QtGui.QMenuBar(self)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1300, 21))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
self.menuBar.setStyleSheet("""
QMenuBar {background-color:#212526; color: #98A6A8;}
QMenuBar::item {background-color:#212526; color: #98A6A8; selection-color: #212526}
QMenuBar::item:selected {color: #212526; background-color: #d7801a;}
""")
self.menuFile = QtGui.QMenu(self.menuBar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuTools = QtGui.QMenu(self.menuBar)
self.menuTools.setObjectName(_fromUtf8("menuTools"))
self.menuHelp = QtGui.QMenu(self.menuBar)
self.setMenuBar(self.menuBar)
self.actionExit = QtGui.QAction(self)
self.actionExit.setObjectName(_fromUtf8("actionExit"))
self.actionExit.triggered.connect(self.close)
self.actionChange_AlertFile = QtGui.QAction(self)
self.actionChange_AlertFile.setObjectName(_fromUtf8("actionCheck_Integrity"))
self.actionChange_AlertFile.triggered.connect(self.changealertfile)
self.actionAddAmbience = QtGui.QAction(self)
self.actionAddAmbience.setObjectName(_fromUtf8("actionadd_ambience"))
self.actionAddAmbience.triggered.connect(self.addambiencefiles)
self.actionEditAmbience = QtGui.QAction(self)
self.actionEditAmbience.setObjectName(_fromUtf8("actionedit_ambience"))
self.actionEditAmbience.triggered.connect(self.editambiencefiles)
self.actionEditReferenceFiles = QtGui.QAction(self)
self.actionEditReferenceFiles.triggered.connect(self.editreferencefiles)
self.actionHowToUseThisProgram = QAction(self)
self.actionHowToUseThisProgram.triggered.connect(self.howtousethisprogram)
self.actionAboutThisProgram = QtGui.QAction(self)
self.actionAboutThisProgram.triggered.connect(self.aboutthisprogram)
self.actioncontactMe = QtGui.QAction(self)
self.actioncontactMe.triggered.connect(self.contactme)
self.menuFile.addAction(self.actionExit)
self.menuTools.addAction(self.actionAddAmbience)
self.menuTools.addAction(self.actionEditAmbience)
self.menuTools.addAction(self.actionChange_AlertFile)
self.menuTools.addAction(self.actionEditReferenceFiles)
self.menuHelp.addAction(self.actionHowToUseThisProgram)
self.menuHelp.addAction(self.actionAboutThisProgram)
self.menuHelp.addAction(self.actioncontactMe)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuTools.menuAction())
self.menuBar.addAction(self.menuHelp.menuAction())
QtCore.QObject.connect(self.PlayButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.statusBar.clearMessage)
self.horizontalLayoutWidget_8 = QtGui.QWidget(self.frame)
self.horizontalLayoutWidget_8.setGeometry(QtCore.QRect(250, 520, 271, 51))
self.horizontalLayoutWidget_8.setObjectName(_fromUtf8("horizontalLayoutWidget_8"))
self.totalsessiondisplayLayout = QtGui.QHBoxLayout(self.horizontalLayoutWidget_8)
self.totalsessiondisplayLayout.setMargin(0)
self.totalsessiondisplayLayout.setObjectName(_fromUtf8("totalsessiondisplayLayout"))
self.totalhoursDisplay = QtGui.QLCDNumber(self.horizontalLayoutWidget_8)
self.totalhoursDisplay.setObjectName(_fromUtf8("totalhoursDisplay"))
self.totalsessiondisplayLayout.addWidget(self.totalhoursDisplay)
self.totalhoursLabel = QtGui.QLabel(self.horizontalLayoutWidget_8)
self.totalhoursLabel.setObjectName(_fromUtf8("totalhoursLabel"))
self.totalsessiondisplayLayout.addWidget(self.totalhoursLabel)
self.totalMinutesDisplay = QtGui.QLCDNumber(self.horizontalLayoutWidget_8)
self.totalMinutesDisplay.setObjectName(_fromUtf8("totalMinutesDisplay"))
self.totalsessiondisplayLayout.addWidget(self.totalMinutesDisplay)
self.totalminutesLabel = QtGui.QLabel(self.horizontalLayoutWidget_8)
self.totalminutesLabel.setObjectName(_fromUtf8("totalminutesLabel"))
self.totalsessiondisplayLayout.addWidget(self.totalminutesLabel)
self.calculateTotalSessionTimeButton = QtGui.QPushButton(self.frame)
self.calculateTotalSessionTimeButton.setGeometry(QtCore.QRect(540, 530, 201, 23))
self.calculateTotalSessionTimeButton.setObjectName(_fromUtf8("calculateTotalSessionTimeButton"))
self.changeallvaluesButton = QtGui.QPushButton(self.frame)
self.changeallvaluesButton.setGeometry(QtCore.QRect(30, 530, 201, 23))
self.changeallvaluesButton.setObjectName(_fromUtf8("changeallvaluesButton"))
self.setWindowTitle("MainWindow")
self.preLabel_2.setText("PRE")
self.rinLabel_2.setText("RIN")
self.kyoLabel_2.setText("KYO")
self.tohLabel_2.setText("TOH")
self.shaLabel_2.setText("SHA")
self.kaiLabel_2.setText("KAI")
self.jinLabel_2.setText("JIN")
self.retsuLabel_2.setText("RETSU")
self.zaiLabel_2.setText("ZAI")
self.zenLabel_2.setText("ZEN")
self.postLabel_2.setText("POST")
self.toptotalsLabel.setText("Create New Session")
self.rintotalLabel.setText("RIN")
self.kyototalLabel.setText("KYO")
self.tohtotalLabel.setText("TOH")
self.shatotalLabel.setText("SHA")
self.kaitotalLabel.setText("KAI")
self.jintotalLabel.setText("JIN")
self.retsutotalLabel.setText("RETSU")
self.zaitotalLabel.setText("ZAI")
self.zentotalLabel.setText("ZEN")
self.label_11.setText("Hours")
self.label_14.setText("Hours")
self.label_15.setText("Hours")
self.label_18.setText("Hours")
self.label_17.setText("Hours")
self.label_16.setText("Hours")
self.label_13.setText("Hours")
self.label_12.setText("Hours")
self.label_10.setText("Hours")
self.label_19.setText("Minutes")
self.label_23.setText("Minutes")
self.label_22.setText("Minutes")
self.label_21.setText("Minutes")
self.label_25.setText("Minutes")
self.label_20.setText("Minutes")
self.label_26.setText("Minutes")
self.label_24.setText("Minutes")
self.label_27.setText("Minutes")
self.changeallvaluesButton.setText("Change All Values To...")
self.topsessionLabel.setText("Total Progress")
self.listofsessionsButton.setText("View Practiced Sessions")
self.prematureendingsbutton.setText("View Premature Endings")
self.AmbienceOption.setText("Ambience In Session?")
self.ReferenceDisplayOption.setText("Display Reference?")
self.CreateButton.setText("Create Session")
self.exportButton.setText("Export Session")
self.PlayButton.setText("Play")
self.PauseButton.setText("Pause")
self.StopButton.setText("Stop")
self.sessionPlayertopLabel.setText("Session Player")
self.EntrainmentVolumeTopLabel.setText("Entrainment Volume")
self.AmbienceVolumeTopLabel.setText("Ambience Volume")
self.currentlyPlayingtopLabel.setText("Currently Playing:")
self.CutPlayingName.setText("No Session Playing")
self.thiscutprogressDescriptionLabel.setText("Progress:")
self.CutPlayingProgressActual.setText("No Session Playing")
self.TotalSessionProgressDescriptionLabel.setText("Total Session Progress:")
self.TotalSessionPlayingProgressActual.setText("No Session Playing")
self.OpenThisCutsReferenceFilesButton.setText("No Cut Playing")
self.setgoalButton.setText("New Goal")
self.goalpacingButton.setText("Goal Pacing")
self.viewgoalsButton.setText("View Goals")
self.completedgoalsButton.setText("Completed Goals")
self.totalminutesLabel.setText("Minutes")
self.totalhoursLabel.setText("Hours")
self.calculateTotalSessionTimeButton.setText("Calculate Total Session Time")
self.currenttopLabel.setText("Current")
self.goaltopLabel.setText("Goal")
self.progresstopLabel.setText("Goal Progress")
self.setgoalstatus()
if not self.sessiondb.goalsset:
self.goalProgressBar.setValue(0)
self.currentgoalLabel.setText("No Goal Set")
self.goalLabel.setText("No Goal Set")
self.menuFile.setTitle("File")
self.menuTools.setTitle("Tools")
self.menuHelp.setTitle("Help")
self.actionExit.setText("Exit")
self.actionChange_AlertFile.setText("Change Alert File")
self.actionAddAmbience.setText("Add Ambience To Program")
self.actionEditAmbience.setText("Edit Program's Ambience")
self.actionEditReferenceFiles.setText("Edit Reference Files")
self.actionAboutThisProgram.setText("About")
self.actionHowToUseThisProgram.setText("Tutorials")
self.actioncontactMe.setText("Contact Me")
self.slidervalues = [self.preSlider_2, self.rinSlider_2, self.kyoSlider_2, self.tohSlider_2, self.shaSlider_2,
self.kaiSlider_2,
self.jinSlider_2, self.retsuSlider_2, self.zaiSlider_2, self.zenSlider_2,
self.postSlider_2]
self.setsignalsandslots()
def changealertfile(self):
Tools.ChangeAlertFile(self)
def editreferencefiles(self):
Reference.EditReferenceFiles(self)
def addambiencefiles(self):
Tools.AddAmbienceFiles(self)
def editambiencefiles(self):
Tools.EditAmbienceFiles(self)
def contactme(self):
self.contactmedialog = QDialog(self)
self.contactmedialog.resize(387, 206)
self.contactmetextBrowser = QtGui.QTextBrowser(self.contactmedialog)
self.contactmetextBrowser.setGeometry(QtCore.QRect(10, 10, 371, 161))
self.contactmeOKButton = QtGui.QPushButton(self.contactmedialog)
self.contactmeOKButton.setGeometry(QtCore.QRect(300, 180, 80, 23))
self.contactmedialog.setWindowTitle("Contact Me")
self.contactmetextBrowser.setHtml(
"<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-family:\'Arial\'; font-size:9pt;\">This program has been a project for me for a couple of years now, and it\'s grown from a basic terminal application to a full-fledged program (now over 4,000 lines of code). Seeing as I\'m the sole developer, chances are, I may have made a few mistakes, and sometimes programs don\'t work as expected. If this happens, please attach your log.txt file (in the program\'s directory) and shoot me an email at [email protected]. I\'ll do my best to get back to you, and resolve the problem ASAP.</span></p></body></html>")
self.contactmeOKButton.setText("OK")
QtCore.QObject.connect(self.contactmeOKButton, QtCore.SIGNAL("clicked()"), self.contactmedialog.accept)
self.contactmedialog.exec_()
| 64.997498 | 711 | 0.702702 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.