blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
218d3ed1c58ad7f4bfc7b1ce49d8780eb94c0a57 | ab8a34e5b821dde7b09abe37c838de046846484e | /twilio/sample-code-master/api/v2010/authorized_connect_app/fetch-default/fetch-default.6.x.py | 511f053b487454f45e009737f2279a5ad002bea5 | [] | no_license | sekharfly/twilio | 492b599fff62618437c87e05a6c201d6de94527a | a2847e4c79f9fbf5c53f25c8224deb11048fe94b | refs/heads/master | 2020-03-29T08:39:00.079997 | 2018-09-21T07:20:24 | 2018-09-21T07:20:24 | 149,721,431 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | # Download the helper library from https://www.twilio.com/docs/python/install
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
account_sid = 'ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
auth_token = 'your_auth_token'
client = Client(account_sid, auth_token)
authorized_connect_app = client \
.authorized_connect_apps('CNXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
.fetch()
print(authorized_connect_app.connect_app_company_name)
| [
"[email protected]"
] | |
70b09e5245552f40904f0ac5b1dabf0e8ef879b3 | 341bd2d71b6b6e3af734f16989aeb450e3e73624 | /HMC6343A.py | 5c8c749d819dc0cab48d0b18bb3f51cb5f52419b | [] | no_license | ControlEverythingCommunity/CE_PYTHON_LIB | 5c170f7e3763ab3b160a5fc33f2bb96d4798c7e2 | 736b29434a451a384c2f52490c849239c3190951 | refs/heads/master | 2021-01-12T00:39:25.374689 | 2017-08-30T21:54:47 | 2017-08-30T21:54:47 | 78,751,564 | 7 | 7 | null | 2017-12-15T11:08:48 | 2017-01-12T14:05:11 | Python | UTF-8 | Python | false | false | 4,911 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# HMC6343A
# This code is designed to work with the HMC6343A_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Accelorometer?sku=HMC6343A_I2CS#tabs-0-product_tabset-2
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
# I2C address of the device
HMC6343A_DEFAULT_ADDRESS = 0x19
# HMC6343A Register Map
HMC6343A_SW_VERSION = 0x02 # Software Version Number
HMC6343A_OP_MODE1 = 0x04 # Operational Mode Register 1
HMC6343A_OP_MODE2 = 0x05 # Operational Mode Register 2
HMC6343A_SN_LSB = 0x06 # Device Serial Number LSB
HMC6343A_SN_MSB = 0x07 # Device Serial Number MSB
HMC6343A_DEVIATION_LSB = 0x0A # Deviation Angle (+/-1800) in tenths of a degree LSB
HMC6343A_DEVIATION_MSB = 0x0B # Deviation Angle (+/-1800) in tenths of a degree MSB
HMC6343A_VARIATION_LSB = 0x0C # Variation Angle (+/-1800) in tenths of a degree LSB
HMC6343A_VARIATION_MSB = 0x0D # Variation Angle (+/-1800) in tenths of a degree MSB
HMC6343A_XOFFSET_LSB = 0x0E # Hard-Iron Calibration Offset for the X-axis LSB
HMC6343A_XOFFSET_MSB = 0x0F # Hard-Iron Calibration Offset for the X-axis MSB
HMC6343A_YOFFSET_LSB = 0x10 # Hard-Iron Calibration Offset for the Y-axis LSB
HMC6343A_YOFFSET_MSB = 0x11 # Hard-Iron Calibration Offset for the Y-axis MSB
HMC6343A_ZOFFSET_LSB = 0x12 # Hard-Iron Calibration Offset for the Z-axis LSB
HMC6343A_ZOFFSET_MSB = 0x13 # Hard-Iron Calibration Offset for the Z-axis MSB
HMC6343A_FILTER_LSB = 0x14 # Heading IIR Filter LSB
HMC6343A_FILTER_MSB = 0x15 # Heading IIR Filter MSB
HMC6343A_POST_ACCEL = 0x40 # Post Accel Data
HMC6343A_POST_MAG = 0x45 # Post Mag Data
# HMC6343A Operational Mode Register-1 Configuration
HMC6343A_OM1_LEVEL = 0x01 # Level Orientation Set
HMC6343A_OM1_UE = 0x02 # Upright Edge Orientation Set
HMC6343A_OM1_UF = 0x04 # Upright Front Orientation Set
HMC6343A_OM1_STDBY = 0x08 # Stand-by Mode Set
HMC6343A_OM1_RUN = 0x10 # Run Mode Set
HMC6343A_OM1_FILTER = 0x20 # IIR Heading Filter Used
HMC6343A_OM1_CAL = 0x40 # Calculating calibration offsets
HMC6343A_OM1_COMP = 0x80 # Calculating compass data
# HMC6343A Operational Mode Register-2 Configuration
HMC6343A_MR_1 = 0x00 # Measurement Rate = 1Hz
HMC6343A_MR_5 = 0x01 # Measurement Rate = 5Hz
HMC6343A_MR_10 = 0x02 # Measurement Rate = 10Hz
class HMC6343A():
def __init__(self):
self.mode_config()
self.measurement_rate_config()
def mode_config(self):
"""Select the Operational Mode Register-1 Configuration from the given provided values"""
MODE_CONFIG = (HMC6343A_OM1_LEVEL | HMC6343A_OM1_RUN)
bus.write_byte_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_OP_MODE1, MODE_CONFIG)
def measurement_rate_config(self):
"""Select the Operational Mode Register-2 Configuration from the given provided values"""
bus.write_byte_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_OP_MODE2, HMC6343A_MR_5)
def read_accl(self):
"""Read data back from HMC6343A_POST_ACCEL(0x40), 6 bytes
X-Axis Accl MSB, X-Axis Accl LSB, Y-Axis Accl MSB, Y-Axis Accl LSB, Z-Axis Accl MSB, Z-Axis Accl LSB"""
data = bus.read_i2c_block_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_POST_ACCEL, 6)
# Convert the data
xAccl = data[0] * 256 + data[1]
if xAccl > 32767 :
xAccl -= 65536
yAccl = data[2] * 256 + data[3]
if yAccl > 32767 :
yAccl -= 65536
zAccl = data[4] * 256 + data[5]
if zAccl > 32767 :
zAccl -= 65536
return {'x' : xAccl, 'y' : yAccl, 'z' : zAccl}
def read_mag(self):
"""Read data back from HMC6343A_POST_MAG(0x45), 6 bytes
X-Axis Mag MSB, X-Axis Mag LSB, Y-Axis Mag MSB, Y-Axis Mag LSB, Z-Axis Mag MSB, Z-Axis Mag LSB"""
data = bus.read_i2c_block_data(HMC6343A_DEFAULT_ADDRESS, HMC6343A_POST_MAG, 6)
# Convert the data
xMag = data[0] * 256 + data[1]
if xMag > 32767 :
xMag -= 65536
yMag = data[2] * 256 + data[3]
if yMag > 32767 :
yMag -= 65536
zMag = data[4] * 256 + data[5]
if zMag > 32767 :
zMag -= 65536
return {'x' : xMag, 'y' : yMag, 'z' : zMag}
from HMC6343A import HMC6343A
hmc6343a = HMC6343A()
while True :
hmc6343a.mode_config()
hmc6343a.measurement_rate_config()
time.sleep(0.1)
accl = hmc6343a.read_accl()
print "Acceleration in X-Axis : %d"%(accl['x'])
print "Acceleration in Y-Axis : %d"%(accl['y'])
print "Acceleration in Z-Axis : %d"%(accl['z'])
mag = hmc6343a.read_mag()
print "Magnetic field in X-Axis : %d"%(mag['x'])
print "Magnetic field in Y-Axis : %d"%(mag['y'])
print "Magnetic field in Z-Axis : %d"%(mag['z'])
print " ************************************* "
time.sleep(1)
| [
"[email protected]"
] | |
0615cccaa29d7378c2edef98d3e1ab29fa9a44ba | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/HanoiDocumentsGetRequest.py | a8388d7a45fc9eab1cb0d78258a5e7a11749c1d2 | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class HanoiDocumentsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.code = None
self.current_page = None
self.id = None
self.name = None
self.page_size = None
def getapiname(self):
return 'taobao.hanoi.documents.get'
| [
"[email protected]"
] | |
6fb4f0d13f849eb1b5e059fec7712d6d8c2ea181 | 8647d911c5a1b765f363c6d904923bfdd9035276 | /4.py | 0d2993fae7b0e616700256e8c21d30b624b7512f | [] | no_license | Thilagaa22/pythonsql | 693f334405d074deffb3dded33be32c2d6a05a49 | 41ef9ffac38adf9501be88e8d6881bc10ebaf37d | refs/heads/master | 2020-05-22T22:43:45.410818 | 2019-05-14T08:45:31 | 2019-05-14T08:45:31 | 186,551,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | s = input()
print(s+'.')
| [
"[email protected]"
] | |
1d56cb579b4790f733317d1a79f0a8d3c8cd7f52 | 1fdc846f4e5b7bda56e8740b859c8340d9b5141a | /tests/test_input_biot_npbc_lagrange.py | cc685e4db4201cb225b7adde4304c90956861222 | [
"BSD-3-Clause"
] | permissive | olivierverdier/sfepy | b824fdab7d91e137a371c277901fbb807b316b02 | 83aefb7b33ea17f4acb83388ba8bc7314c77616c | refs/heads/master | 2021-01-18T05:39:13.127137 | 2010-10-25T13:13:18 | 2010-10-25T17:31:37 | 1,022,869 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | input_name = '../examples/biot/biot_npbc_lagrange.py'
output_name = 'test_biot_npbc_lagrange.vtk'
from testsBasic import TestInput
class Test(TestInput):
pass
| [
"[email protected]"
] | |
e1eca558e5cbce67053dce1a670a4aa3069896cd | ff6248be9573caec94bea0fa2b1e4b6bf0aa682b | /StudentProblem/10.21.12.28/5/1569571946.py | af429088f6ad955c7b65295f7f9a893d0bbe5fc1 | [] | no_license | LennartElbe/codeEvo | 0e41b1a7705204e934ef71a5a28c047366c10f71 | e89b329bc9edd37d5d9986f07ca8a63d50686882 | refs/heads/master | 2020-12-21T17:28:25.150352 | 2020-03-26T10:22:35 | 2020-03-26T10:22:35 | 236,498,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | import functools
import typing
import string
import random
import pytest
## Lösung Teil 1.
def mysum(xs: list) -> int:
return sum(xs)
## Lösung Teil 2. (Tests)
def test_mysum():
assert mysum([1,2,3]) == 6
######################################################################
| [
"[email protected]"
] | |
e83344bc5686876da9d4a28fdd9a32c2540bccf5 | a7b5adc5f72b9ef71c0c71691492f8af8a32c868 | /Minseo-Kim/leetcode/206_Reverse_linked_list.py | 73148fa6b52f9be00ce91fc881e9ae477ff1fd88 | [] | no_license | mintheon/Practice-Algorithm | 535ff607e36d1bfa9f800a28091a52c48748221c | 3a653a1d8cc6e1438cab47a427ccd0b421a10010 | refs/heads/master | 2023-04-10T17:43:10.434210 | 2021-04-10T18:46:26 | 2021-04-10T18:46:26 | 347,719,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# Time complexity: O(n), ** Space complexity: O(1)**
# Runtime: 32 ms, faster than 87.13% of Python3
# Memory Usage: 15.5 MB, less than 93.25% of Python3
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
node, prev = head, None
while node:
# node <- node.next
# node.next <- prev
# prev <- node(current)
node.next, node, prev = prev, node.next, node
return prev | [
"[email protected]"
] | |
3f53f5573681c9ce0fc51b144d84850bc70bcfb1 | 7a87b2701dedeab6ad0c237feebdb3434d28231e | /ui_framework/page/index_page.py | 623efd36b370587149fe71e05467bb3cc8843856 | [] | no_license | tingyu-ui/test_dwpytest | 002a81f897b61c1e593d0f07f973b8380a725f9c | 4765ed74b64582453ddce6e318aa626049b133e8 | refs/heads/master | 2023-08-28T02:25:25.814423 | 2021-10-06T17:58:00 | 2021-10-06T17:59:02 | 375,302,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#雪球首页page
#可以直接继承basepage,调用已经封装好的UI操作
import yaml
from ui_framework.base_page import BasePage
class IndexPage(BasePage):
def goto_market(self):
#xpath等同于By.xpath
# self.find("xpath", "//*[@text='行情']").click()
# print(data)
#data格式 {'-action': 'click', 'by': 'xpath', 'value': "//*[@text='行情']"}}
#函数名:[{'action': ,'by': ,'value':, {}, {}}]
#使用steps函数取出所有step
# steps = data.get("goto_market")
self.run_steps("../page/index_page.yaml", "goto_market")
| [
"[email protected]"
] | |
11f67c0308b87d360a68ff26de26d4697538d0de | 0b842bcb3bf20e1ce628d39bf7e11abd7699baf9 | /sql/register_sql.py | 9fac3d26fe0e079d1338a20b332cb6855862c0c9 | [] | no_license | afeset/miner2-tools | 75cc8cdee06222e0d81e39a34f621399e1ceadee | 81bcc74fe7c0ca036ec483f634d7be0bab19a6d0 | refs/heads/master | 2016-09-05T12:50:58.228698 | 2013-08-27T21:09:56 | 2013-08-27T21:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | '''
Created on Aug 14, 2013
@author: asaf
'''
import sys
sys.path.append("/home/asaf/miner2.0")
import miner_globals
sys.path.append("/home/asaf/miner2-tools")
from sql import sql_target
miner_globals.addTargetToClassMapping("sql",None,"sql_target.sqlDump","Write To SQL DB")
| [
"[email protected]"
] | |
7808b767d59251d668304489971025bb095ee8da | b207f16445f98cdf18ce4eb06ca8c1b292f7a096 | /mysite/urls.py | 149018110f85268095b7a27c3ed9a664fd15aeec | [] | no_license | Hugekyung/rest-board | 653adac144244eddab65edf9754061b1046df182 | 038fb71b8cccc91e828b29025b40a15d249bb41a | refs/heads/main | 2023-02-24T16:45:48.320337 | 2021-01-27T14:08:27 | 2021-01-27T14:08:27 | 331,324,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('post_service.urls')),
]
| [
"[email protected]"
] | |
841815712a69ec53210e65b2409387ca65d4c2ba | 50a00c194afddf2298504605f8a3dc1e1f854721 | /sqlparse1/engine/grouping.py | e4f0cbf794843a12f84c397700779cc511b450d1 | [] | no_license | CudaText-addons/cuda_fmt_sql_uroboro | ee57f8d600f4f838458519eb6a89a0690b6687c7 | bbebcfee9ecfe3c317f62a1c8259b58f1da5b9c2 | refs/heads/master | 2020-06-18T21:38:40.678645 | 2019-07-11T21:25:46 | 2019-07-11T21:25:46 | 196,459,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,236 | py | # -*- coding: utf-8 -*-
import itertools
from sqlparse1 import sql
from sqlparse1 import tokens as T
import sys
try:
next
except NameError: # Python < 2.6
next = lambda i: i.__next__()
def _group_left_right(tlist, ttype, value, cls,
check_right=lambda t: True,
check_left=lambda t: True,
include_semicolon=False):
[_group_left_right(sgroup, ttype, value, cls, check_right, check_left,
include_semicolon) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, cls)]
idx = 0
token = tlist.token_next_match(idx, ttype, value)
while token:
right = tlist.token_next(tlist.token_index(token))
left = tlist.token_prev(tlist.token_index(token))
if right is None or not check_right(right):
token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
elif left is None or not check_left(left):
token = tlist.token_next_match(tlist.token_index(token) + 1,
ttype, value)
else:
if include_semicolon:
sright = tlist.token_next_match(tlist.token_index(right),
T.Punctuation, ';')
if sright is not None:
# only overwrite "right" if a semicolon is actually
# present.
right = sright
tokens = tlist.tokens_between(left, right)[1:]
if not isinstance(left, cls):
new = cls([left])
new_idx = tlist.token_index(left)
tlist.tokens.remove(left)
tlist.tokens.insert(new_idx, new)
left = new
left.tokens.extend(tokens)
for t in tokens:
tlist.tokens.remove(t)
token = tlist.token_next_match(tlist.token_index(left) + 1,
ttype, value)
def _find_matching(idx, tlist, start_ttype, start_value, end_ttype, end_value):
depth = 1
for tok in tlist.tokens[idx:]:
if tok.match(start_ttype, start_value):
depth += 1
elif tok.match(end_ttype, end_value):
depth -= 1
if depth == 1:
return tok
return None
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
#bugfix recurse
# [_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
# cls, include_semicolon) for sgroup in tlist.get_sublists()
[_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon, recurse) for sgroup in tlist.get_sublists()
if recurse]
if isinstance(tlist, cls):
idx = 1
else:
idx = 0
token = tlist.token_next_match(idx, start_ttype, start_value)
while token:
tidx = tlist.token_index(token)
end = _find_matching(tidx, tlist, start_ttype, start_value,
end_ttype, end_value)
if end is None:
idx = tidx + 1
else:
if include_semicolon:
next_ = tlist.token_next(tlist.token_index(end))
if next_ and next_.match(T.Punctuation, ';'):
end = next_
group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
_group_matching(group, start_ttype, start_value,
end_ttype, end_value, cls, include_semicolon)
idx = tlist.token_index(group) + 1
token = tlist.token_next_match(idx, start_ttype, start_value)
def group_if(tlist):
_group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', sql.If, True)
def group_for(tlist):
_group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP',
sql.For, True)
def group_foreach(tlist):
_group_matching(tlist, T.Keyword, 'FOREACH', T.Keyword, 'END LOOP',
sql.For, True)
def group_begin(tlist):
_group_matching(tlist, T.Keyword, 'BEGIN', T.Keyword, 'END',
sql.Begin, True)
def group_as(tlist):
def _right_valid(token):
# Currently limited to DML/DDL. Maybe additional more non SQL reserved
# keywords should appear here (see issue8).
return not token.ttype in (T.DML, T.DDL)
def _left_valid(token):
if token.ttype is T.Keyword and token.value in ('NULL',):
return True
return token.ttype is not T.Keyword
_group_left_right(tlist, T.Keyword, 'AS', sql.Identifier,
check_right=_right_valid,
check_left=_left_valid)
def group_assignment(tlist):
_group_left_right(tlist, T.Assignment, ':=', sql.Assignment,
include_semicolon=True)
def group_comparison(tlist):
def _parts_valid(token):
return (token.ttype in (T.String.Symbol, T.String.Single,
T.Name, T.Number, T.Number.Float,
T.Number.Integer, T.Literal,
T.Literal.Number.Integer, T.Name.Placeholder)
or isinstance(token, (sql.Identifier, sql.Parenthesis))
or (token.ttype is T.Keyword
and token.value.upper() in ['NULL', ]))
_group_left_right(tlist, T.Operator.Comparison, None, sql.Comparison,
check_left=_parts_valid, check_right=_parts_valid)
def group_case(tlist):
_group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', sql.Case,
include_semicolon=True, recurse=True)
def group_identifier(tlist):
def _consume_cycle(tl, i):
# TODO: Usage of Wildcard token is ambivalent here.
x = itertools.cycle((
lambda y: (y.match(T.Punctuation, '.')
or y.ttype in (T.Operator,
T.Wildcard,
T.Name)
or isinstance(y, sql.SquareBrackets)),
lambda y: (y.ttype in (T.String.Symbol,
T.Name,
T.Wildcard,
T.Literal.String.Single,
T.Literal.Number.Integer,
T.Literal.Number.Float)
or isinstance(y, (sql.Parenthesis,
sql.SquareBrackets,
sql.Function)))))
for t in tl.tokens[i:]:
# Don't take whitespaces into account.
if t.ttype is T.Whitespace:
yield t
continue
if next(x)(t):
yield t
else:
if isinstance(t, sql.Comment) and t.is_multiline():
yield t
return
def _next_token(tl, i):
# chooses the next token. if two tokens are found then the
# first is returned.
t1 = tl.token_next_by_type(
i, (T.String.Symbol, T.Name, T.Literal.Number.Integer,
T.Literal.Number.Float))
i1 = tl.token_index(t1, start=i) if t1 else None
t2_end = None if i1 is None else i1 + 1
t2 = tl.token_next_by_instance(i, (sql.Function, sql.Parenthesis), end=t2_end)
if t1 and t2:
i2 = tl.token_index(t2, start=i)
if i1 > i2:
return t2
else:
return t1
elif t1:
return t1
else:
return t2
# bottom up approach: group subgroups first
[group_identifier(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Identifier)]
# real processing
idx = 0
token = _next_token(tlist, idx)
while token:
identifier_tokens = [token] + list(
_consume_cycle(tlist,
tlist.token_index(token, start=idx) + 1))
# remove trailing whitespace
if identifier_tokens and identifier_tokens[-1].ttype is T.Whitespace:
identifier_tokens = identifier_tokens[:-1]
if not (len(identifier_tokens) == 1
and (isinstance(identifier_tokens[0], (sql.Function, sql.Parenthesis))
or identifier_tokens[0].ttype in (T.Literal.Number.Integer,
T.Literal.Number.Float))):
group = tlist.group_tokens(sql.Identifier, identifier_tokens)
idx = tlist.token_index(group, start=idx) + 1
else:
idx += 1
token = _next_token(tlist, idx)
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.IdentifierList)]
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, (sql.Identifier, sql.Function,
sql.Case)),
lambda t: t.is_whitespace(),
lambda t: t.ttype == T.Name,
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.match(T.Keyword, 'role'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
lambda t: t.ttype == T.Name.Placeholder,
lambda t: t.ttype == T.Keyword,
lambda t: isinstance(t, sql.Comparison),
lambda t: isinstance(t, sql.Comment),
lambda t: t.ttype == T.Comment.Multiline,
]
tcomma = tlist.token_next_match(0, T.Punctuation, ',')
start = None
while tcomma is not None:
# Go back one idx to make sure to find the correct tcomma
idx = tlist.token_index(tcomma)
before = tlist.token_prev(idx)
after = tlist.token_next(idx)
# Check if the tokens around tcomma belong to a list
bpassed = apassed = False
for func in fend1_funcs:
if before is not None and func(before):
bpassed = True
if after is not None and func(after):
apassed = True
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
tcomma = tlist.token_next_match(idx + 1,
T.Punctuation, ',')
else:
if start is None:
start = before
after_idx = tlist.token_index(after, start=idx)
next_ = tlist.token_next(after_idx)
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(sql.IdentifierList, tokens)
start = None
tcomma = tlist.token_next_match(tlist.token_index(group) + 1,
T.Punctuation, ',')
else:
tcomma = next_
def group_brackets(tlist):
"""Group parentheses () or square brackets []
This is just like _group_matching, but complicated by the fact that
round brackets can contain square bracket groups and vice versa
"""
if isinstance(tlist, (sql.Parenthesis, sql.SquareBrackets)):
idx = 1
else:
idx = 0
# Find the first opening bracket
token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
while token:
start_val = token.value # either '(' or '['
if start_val == '(':
end_val = ')'
group_class = sql.Parenthesis
else:
end_val = ']'
group_class = sql.SquareBrackets
tidx = tlist.token_index(token)
# Find the corresponding closing bracket
end = _find_matching(tidx, tlist, T.Punctuation, start_val,
T.Punctuation, end_val)
if end is None:
idx = tidx + 1
else:
group = tlist.group_tokens(group_class,
tlist.tokens_between(token, end))
# Check for nested bracket groups within this group
group_brackets(group)
idx = tlist.token_index(group) + 1
# Find the next opening bracket
token = tlist.token_next_match(idx, T.Punctuation, ['(', '['])
def group_comments(tlist):
[group_comments(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Comment)]
idx = 0
token = tlist.token_next_by_type(idx, T.Comment)
while token:
tidx = tlist.token_index(token)
end = tlist.token_not_matching(tidx + 1,
[lambda t: t.ttype in T.Comment,
lambda t: t.is_whitespace()])
if end is None:
idx = tidx + 1
else:
eidx = tlist.token_index(end)
grp_tokens = tlist.tokens_between(token,
tlist.token_prev(eidx, False))
group = tlist.group_tokens(sql.Comment, grp_tokens)
idx = tlist.token_index(group)
token = tlist.token_next_by_type(idx, T.Comment)
# bugfix Oracle10g merge and Oracle connect by
def group_where(tlist):
def end_match(token):
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
'WHEN', # for Oracle10g merge
'CONNECT', # for Oracle connect by
)
if token.match(T.Keyword, stopwords):
return True
if token.match(T.DML, ('DELETE')): # for Oracle10g merge
return True
if token.match(T.DML, ('START')): # for Oracle connect by
return True
return False
[group_where(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Where)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
while token:
tidx = tlist.token_index(token)
end = tlist.token_matching(tidx + 1, (end_match, ))
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
group = tlist.group_tokens(sql.Where,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(group)
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
def group_aliased(tlist):
clss = (sql.Identifier, sql.Function, sql.Case)
[group_aliased(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, clss)]
idx = 0
token = tlist.token_next_by_instance(idx, clss)
while token:
next_ = tlist.token_next(tlist.token_index(token))
if next_ is not None and isinstance(next_, clss):
# for jython str.upper()
# if not next_.value.upper().startswith('VARCHAR'):
text = next_.value
if sys.version_info[0] < 3 and isinstance(text, str):
text = text.decode('utf-8').upper().encode('utf-8')
if not text.startswith('VARCHAR'):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_instance(idx, clss)
def group_typecasts(tlist):
_group_left_right(tlist, T.Punctuation, '::', sql.Identifier)
def group_functions(tlist):
[group_functions(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, sql.Function)]
idx = 0
token = tlist.token_next_by_type(idx, T.Name)
while token:
next_ = tlist.token_next(token)
if not isinstance(next_, sql.Parenthesis):
idx = tlist.token_index(token) + 1
else:
func = tlist.group_tokens(sql.Function,
tlist.tokens_between(token, next_))
idx = tlist.token_index(func) + 1
token = tlist.token_next_by_type(idx, T.Name)
def group_order(tlist):
idx = 0
token = tlist.token_next_by_type(idx, T.Keyword.Order)
while token:
prev = tlist.token_prev(token)
if isinstance(prev, sql.Identifier):
ido = tlist.group_tokens(sql.Identifier,
tlist.tokens_between(prev, token))
idx = tlist.token_index(ido) + 1
else:
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_type(idx, T.Keyword.Order)
def align_comments(tlist):
[align_comments(sgroup) for sgroup in tlist.get_sublists()]
idx = 0
token = tlist.token_next_by_instance(idx, sql.Comment)
while token:
before = tlist.token_prev(tlist.token_index(token))
if isinstance(before, sql.TokenList):
grp = tlist.tokens_between(before, token)[1:]
before.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(before) + 1
else:
idx = tlist.token_index(token) + 1
token = tlist.token_next_by_instance(idx, sql.Comment)
def group(tlist):
for func in [
group_comments,
group_brackets,
group_functions,
# bugfix Oracle10g merge
# group_where,
# group_case,
group_case,
group_where,
group_identifier,
group_order,
group_typecasts,
group_as,
group_aliased,
group_assignment,
group_comparison,
align_comments,
group_identifier_list,
group_if,
group_for,
group_foreach,
group_begin,
]:
func(tlist)
| [
"[email protected]"
] | |
d6b4388855a0884a32f7d4a5d924a2e063dda428 | 03e4e75a00044df181adbebf5f44b5076d97a15f | /example/0_Basic_usage_of_the_library/python_motor/3_delete.py | 944d7f2e83b32bde74bae3324bcb045b7ab6c39c | [
"MIT"
] | permissive | RecluseXU/learning_spider | 3820b15654bb5824b1f92c53389d24799ff2bb88 | 43831e2fbbd5de0cf729ce8c12c84d043b56e855 | refs/heads/master | 2023-06-08T09:36:26.307395 | 2023-05-19T02:48:08 | 2023-05-19T02:48:08 | 234,718,806 | 64 | 14 | null | 2020-01-20T11:54:29 | 2020-01-18T10:38:06 | null | UTF-8 | Python | false | false | 820 | py | # -*- encoding: utf-8 -*-
'''
@Time : 2021-06-09
@Author : EvilRecluse
@Contact : https://github.com/RecluseXU
@Desc : 删除
'''
# here put the import lib
from motor.motor_asyncio import AsyncIOMotorClient
import asyncio
async def do_delete(client: AsyncIOMotorClient):
""" 删除一条记录
"""
collection = client['temp']['temp']
result = await collection.delete_one({'name': '百度'})
print(result)
async def do_delete_many(client: AsyncIOMotorClient):
""" 删除多条记录
"""
collection = client['temp']['temp']
result = await collection.delete_many({})
print(result)
client = AsyncIOMotorClient('mongodb://localhost:27017')
loop = asyncio.get_event_loop()
loop.run_until_complete(do_delete(client))
loop.run_until_complete(do_delete_many(client))
| [
"[email protected]"
] | |
ba5b46a31f4fb683772f0bbe00da6ac8986514ce | b03878679b1e07e0ec962083dd4d058d1503180f | /pyoxford/translator_api.py | 50ec01c31e66fc1aef9305b12d2977aaffa05fc1 | [
"MIT"
] | permissive | jhoelzl/pyoxford | d3a8abfb822706fbba17792e8519ac3d2d05e36d | 9e3f2e0130951e0ccb3c3f7fd8798219e1c36ee2 | refs/heads/master | 2020-12-28T12:07:27.963018 | 2015-11-28T14:33:44 | 2015-11-28T14:33:44 | 58,353,003 | 0 | 0 | null | 2016-05-09T06:17:24 | 2016-05-09T06:17:23 | null | UTF-8 | Python | false | false | 2,222 | py | import urllib.parse
from xml.etree import ElementTree
import requests
class Translator(object):
AUTH_URL = "https://datamarket.accesscontrol.windows.net/v2/OAuth2-13"
API_ROOT = "http://api.microsofttranslator.com/v2/Http.svc"
TRANSLATE_URL = "http://api.microsofttranslator.com/v2/Http.svc/Translates"
def __init__(self, client_id, client_secret):
self.__token = ""
self.authorize(client_id, client_secret)
def authorize(self, client_id, client_secret):
headers = {
"Content-type": "application/x-www-form-urlencoded"
}
params = urllib.parse.urlencode({
"grant_type": "client_credentials",
"client_id": client_id,
"client_secret": client_secret,
"scope": "http://api.microsofttranslator.com"
})
resp = requests.post(self.AUTH_URL, data=params, headers=headers)
if resp.ok:
_body = resp.json()
self.__token = _body["access_token"]
else:
resp.raise_for_status()
def detect(self, text):
params = {
"text": text
}
url = self.API_ROOT + "/Detect?" + urllib.parse.urlencode(params)
resp = requests.get(url, headers=self.__make_header())
result = {}
if resp.ok:
root = ElementTree.fromstring(resp.content)
result = root.text
else:
resp.raise_for_status()
return result
def translate(self, text, lang_to, lang_from=""):
# language codes
# https://msdn.microsoft.com/en-us/library/hh456380.aspx
params = {
"text": text,
"to": lang_to
}
if lang_from:
params["from"] = lang_from
url = self.API_ROOT + "/Translate?" + urllib.parse.urlencode(params)
resp = requests.get(url, headers=self.__make_header())
result = {}
if resp.ok:
root = ElementTree.fromstring(resp.content)
result = root.text
else:
resp.raise_for_status()
return result
def __make_header(self):
return {
"Authorization": "Bearer {0}".format(self.__token)
}
| [
"[email protected]"
] | |
2ae95574f310aa8df7b035537d7208bc72e1225f | 5e381364c2ab31ff3618369085afffba6caa8edb | /recipes/squirrel/all/test_package/conanfile.py | 5789855d52cb28e63a00eaa87670af8ce596836e | [
"MIT"
] | permissive | CAMOBAP/conan-center-index | 16aea68a6d22da22831ba985773125e8eda08f00 | 67d57532bdad549fef3fa6cb8fcdfa86bc55e4f1 | refs/heads/master | 2023-07-30T08:58:57.285571 | 2021-10-02T14:57:54 | 2021-10-02T14:57:54 | 323,262,699 | 1 | 0 | MIT | 2021-05-29T13:37:04 | 2020-12-21T07:30:02 | Python | UTF-8 | Python | false | false | 559 | py | from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.definitions["SQUIRREL_SHARED"] = self.options["squirrel"].shared
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self.settings):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
| [
"[email protected]"
] | |
23b3665560b2105af94061d1dfb27c99ee78e8e3 | 709c35fded3567512c1eec3bb7facb63e4891903 | /mark2cure/task/ner/models.py | 5c8d298d572b68cf492693848e88e40401c9c0fc | [
"MIT"
] | permissive | SuLab/mark2cure | da8ddca9d35cc10116417451d8a4caea29f279c5 | 63d20e75b8817ad75c6766b4d8a7a8ee8207d512 | refs/heads/master | 2023-02-04T08:51:22.372167 | 2018-02-28T02:50:25 | 2018-02-28T02:50:25 | 54,926,527 | 17 | 14 | null | 2023-01-11T22:31:16 | 2016-03-28T21:58:29 | Python | UTF-8 | Python | false | false | 790 | py | from django.db import models
from .managers import EntityRecognitionAnnotationManager
from django.forms.models import model_to_dict
class EntityRecognitionAnnotation(models.Model):
# Only access through Document.Annotation.metadata.RelationAnnotation
DISEASE = 0
GENE = 1
TREATMENT = 2
TYPE_CHOICES = (
(DISEASE, 'Disease'),
(GENE, 'Gene'),
(TREATMENT, 'Treatment')
)
type_idx = models.IntegerField(choices=TYPE_CHOICES, blank=True, null=True)
text = models.TextField(blank=True, null=True)
# (WARNING) Different than BioC
# This is always the start position relative
# to the section, not the entire document
start = models.IntegerField(blank=True, null=True)
objects = EntityRecognitionAnnotationManager()
| [
"[email protected]"
] | |
93751656fda3d971cc190873ece55248d3a8d757 | 58a87e847f8c6cd5b83cbe5758e779679563cc66 | /Exercícios complementares/ExercícioG.py | cceba6383e0aa814ccbfd132a91df968d153333a | [] | no_license | suzanamfp/Atividades-complementares-Python | 9444dec6cd952db3cdeaf26648f0eb60a89a0862 | d1a04f3f4d70298aa8448a37ba3e4b5313ced472 | refs/heads/master | 2022-12-02T21:23:55.261903 | 2020-08-18T19:28:34 | 2020-08-18T19:28:34 | 288,545,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def main():
cores = ['azul', 'vermelho', 'cinza', 'amarelo']
cores.remove('vermelho')
print(cores)
main() | [
"[email protected]"
] | |
b7559dc7812e5200464d1e7279d70aedf5f87fb3 | d73b14bd20cfc1320e1911247b28c5109c51b5d1 | /training/train_openpose.py | 75a4569e1b897416ebc493dee676ee8e17f683fe | [] | no_license | rainyucool/openpose-pytorch | 9c7e32bdab53417fef30d81a2dc53019dc3dbd84 | 4bc9cf4c927fdb507d89198724a237800cad9b3e | refs/heads/master | 2020-04-01T13:25:04.702843 | 2018-05-07T13:12:11 | 2018-05-07T13:12:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | import torch
from torch.utils.data import DataLoader
from config import cfg
from datasets.coco_dataset import CocoDataset
from models import model_openpose
from skeletons.gt_generators.gt_generator_openpose import GroundTruthGeneratorOpenPose
from skeletons.skeleton_config_openpose import SkeletonConfigOpenPose
from training.train_prod import train
from training.train_utils import get_losses, fix_layers_weights
network_model_handler = model_openpose.OpenPoseModelHandler()
network = network_model_handler.get_train_model()
network_model_handler.load_state_dict(network)
fix_layers_weights(network, "stage[2-6]_[1-9]_(joint|limb)_maps")
skeleton_config = SkeletonConfigOpenPose()
gt_generator = GroundTruthGeneratorOpenPose(network, skeleton_config)
train_dataset = CocoDataset([cfg.dataset.train_hdf5], skeleton_config, gt_generator,
network, augment=True)
sim_dataset = CocoDataset(["/media/USERNAME/Store1/sim_train_18_04_17_ITSC.h5"], skeleton_config, gt_generator,
network, augment=True)
train_sets = torch.utils.data.ConcatDataset([train_dataset, sim_dataset])
train_loader = DataLoader(train_sets, cfg.train.batch_size, shuffle=True)
train(network, train_loader, get_losses, fix_regex="stage[2-6]_[1-9]_(joint|limb)_maps")
| [
"[email protected]"
] | |
1a6e7eb51c70cb8fd43657c64d233264aef82988 | d70a4ec35ac91c914c42611e8b0ee05525371f7a | /src/lwc/settings_old.py | 392a0df76f497a6832088dac3f360cb7bffbefc8 | [] | no_license | jwilsontt/lwc | b662de74e47f350a732cc5e1e52a80bd4da46524 | 975a45aab16019f03880dafcd1b1ee7931613613 | refs/heads/master | 2021-01-21T07:53:44.348760 | 2015-08-21T04:05:36 | 2015-08-21T04:05:36 | 33,587,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,454 | py | """
Django settings for lwc project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd=643az*af2ts!1)stb+#5vuk1739ve&vsg&mh_j#qj&aaiz+2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'south',
'joins',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'lwc.middleware.ReferMiddleware',
)
ROOT_URLCONF = 'lwc.urls'
WSGI_APPLICATION = 'lwc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SHARE_URL = "http://127.0.0.1:8000/?ref="
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
#BASE_DIR + "/templates/",
#'/Users/jasonwilson/Documents/ProgWork/lwc/src/templates/',
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static', 'static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static', 'static_dirs'),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'static', 'media')
MEDIA_URL = '/media/'
| [
"[email protected]"
] | |
46ab94c8a8e2a2f0c3ac17d32ed98651ad8589fb | 0849923ebcde8f56a6e8550ae4f3c5ee3e2e0846 | /desktop/core/ext-py/MySQL-python-1.2.3c1/setup_posix.py | 5895984e6cb9da83ebb46ef3be36f4b55e2e0b0d | [
"GPL-2.0-only",
"GPL-1.0-or-later",
"Apache-2.0"
] | permissive | thinker0/hue | 511a5796cdfe45e0b27f1d3309557ca60ce8b13b | ee5aecc3db442e962584d3151c0f2eab397d6707 | refs/heads/master | 2022-07-10T02:37:23.591348 | 2014-03-27T20:05:00 | 2014-03-27T20:05:00 | 12,731,435 | 0 | 0 | Apache-2.0 | 2022-07-01T17:44:37 | 2013-09-10T14:13:42 | Python | UTF-8 | Python | false | false | 2,947 | py | from ConfigParser import SafeConfigParser
# This dequote() business is required for some older versions
# of mysql_config
def dequote(s):
if s[0] in "\"'" and s[0] == s[-1]:
s = s[1:-1]
return s
def compiler_flag(f):
return "-%s" % f
def mysql_config(what):
from os import popen
f = popen("%s --%s" % (mysql_config.path, what))
data = f.read().strip().split()
ret = f.close()
if ret:
if ret/256:
data = []
if ret/256 > 1:
raise EnvironmentError("%s not found" % (mysql_config.path,))
return data
mysql_config.path = "mysql_config"
def get_config():
import os, sys
from setup_common import get_metadata_and_options, enabled, create_release_file
metadata, options = get_metadata_and_options()
if 'mysql_config' in options:
mysql_config.path = options['mysql_config']
extra_objects = []
static = enabled(options, 'static')
if enabled(options, 'embedded'):
libs = mysql_config("libmysqld-libs")
client = "mysqld"
elif enabled(options, 'threadsafe'):
libs = mysql_config("libs_r")
client = "mysqlclient_r"
if not libs:
libs = mysql_config("libs")
client = "mysqlclient"
else:
libs = mysql_config("libs")
client = "mysqlclient"
library_dirs = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("L")) ]
libraries = [ dequote(i[2:]) for i in libs if i.startswith(compiler_flag("l")) ]
removable_compile_args = [ compiler_flag(f) for f in "ILl" ]
extra_compile_args = [ i.replace("%", "%%") for i in mysql_config("cflags")
if i[:2] not in removable_compile_args ]
include_dirs = [ dequote(i[2:])
for i in mysql_config('include')
if i.startswith(compiler_flag('I')) ]
if not include_dirs: # fix for MySQL-3.23
include_dirs = [ dequote(i[2:])
for i in mysql_config('cflags')
if i.startswith(compiler_flag('I')) ]
if static:
extra_objects.append(os.path.join(
library_dirs[0],'lib%s.a' % client))
name = "MySQL-python"
if enabled(options, 'embedded'):
name = name + "-embedded"
metadata['name'] = name
define_macros = [
('version_info', metadata['version_info']),
('__version__', metadata['version']),
]
create_release_file(metadata)
del metadata['version_info']
ext_options = dict(
name = "_mysql",
library_dirs = library_dirs,
libraries = libraries,
extra_compile_args = extra_compile_args,
include_dirs = include_dirs,
extra_objects = extra_objects,
define_macros = define_macros,
)
return metadata, ext_options
if __name__ == "__main__":
print """You shouldn't be running this directly; it is used by setup.py."""
| [
"[email protected]"
] | |
cc5fa549674d5f7f51c7555601bb75a5d2f426f5 | 9b0bdebe81e558d3851609687e4ccd70ad026c7f | /数据结构/链表/10.链表相交.py | f407d7d61aadf869f8133d2993fdec96d025f9a8 | [] | no_license | lizenghui1121/DS_algorithms | 645cdad007ccbbfa82cc5ca9e3fc7f543644ab21 | 9690efcfe70663670691de02962fb534161bfc8d | refs/heads/master | 2022-12-13T22:45:23.108838 | 2020-09-07T13:40:17 | 2020-09-07T13:40:17 | 275,062,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | """
@Author: Li Zenghui
@Date: 2020-07-17 14:47
"""
class ListNode:
def __init__(self, val):
self.val = val
self.next = None
def getIntersectionNode(headA, headB):
def getLength(head):
count = 0
while head:
count += 1
head = head.next
return count
l1 = getLength(headA)
l2 = getLength(headB)
if l1 > l2:
gap = l1 - l2
while gap > 0:
headA = headA.next
gap -= 1
if l2 > l1:
gap = l2 - l1
while gap > 0:
headB = headB.next
gap -= 1
while headA:
if headA == headB:
return headA
headB = headB.next
headA = headA.next
return None
if __name__ == '__main__':
n0 = ListNode(5)
n1 = ListNode(3)
n2 = ListNode(4)
n3 = ListNode(2)
m1 = ListNode(4)
n0.next = n1
n1.next = n2
n2.next = n3
m1.next = n2
getIntersectionNode(n0, m1) | [
"[email protected]"
] | |
d665df43152a171e231c0793371cf072aca4f3a1 | 31bd9276e74446a05cb41644dc8baf55d8662f93 | /xnr_0429/xnr/weibo_xnr_knowledge_base_management/utils.py | f3093366cccee8bd07fb22b3e81de8f8d384c4a2 | [] | no_license | SDsonghuiui/xnr2 | 4938a73adcafdc5881a81ffc9eaa93b08fb770e3 | 30d1d41566c58ee7925da6c2c6d741e229cda852 | refs/heads/master | 2020-04-06T08:25:42.176438 | 2018-11-12T14:10:32 | 2018-11-12T14:10:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,301 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
import time
import json
import pinyin
import numpy as np
from xnr.global_config import S_TYPE,S_DATE
from xnr.global_utils import es_xnr as es
from xnr.global_utils import es_user_portrait,es_user_profile,es_flow_text,flow_text_index_name_pre,flow_text_index_type
from xnr.global_utils import r,weibo_target_domain_detect_queue_name,es_user_portrait,portrait_index_name,portrait_index_type,weibo_date_remind_index_name,weibo_date_remind_index_type,\
weibo_sensitive_words_index_name,weibo_sensitive_words_index_type,\
weibo_hidden_expression_index_name,weibo_hidden_expression_index_type,\
weibo_xnr_corpus_index_name,weibo_xnr_corpus_index_type,\
weibo_domain_index_name,weibo_domain_index_type,weibo_role_index_name,\
weibo_role_index_type,weibo_example_model_index_name,\
weibo_example_model_index_type,profile_index_name,profile_index_type,\
opinion_corpus_index_name,opinion_corpus_index_type,\
all_opinion_corpus_index_name,all_opinion_corpus_index_type
from xnr.time_utils import ts2datetime,datetime2ts,get_flow_text_index_list
from xnr.parameter import MAX_VALUE,MAX_SEARCH_SIZE,domain_ch2en_dict,topic_en2ch_dict,domain_en2ch_dict,\
EXAMPLE_MODEL_PATH,TOP_ACTIVE_TIME,TOP_PSY_FEATURE
from xnr.utils import uid2nick_name_photo,judge_sensing_sensor,judge_follow_type,get_influence_relative
from textrank4zh import TextRank4Keyword, TextRank4Sentence
'''
领域知识库
'''
#use to merge dict
#input: dict1, dict2, dict3...
#output: merge dict
def union_dict(*objs):
_keys = set(sum([obj.keys() for obj in objs], []))
_total = {}
for _key in _keys:
_total[_key] = sum([int(obj.get(_key, 0)) for obj in objs])
return _total
def extract_keywords(w_text):
tr4w = TextRank4Keyword()
tr4w.analyze(text=w_text, lower=True, window=4)
k_dict = tr4w.get_keywords(5, word_min_len=2)
return k_dict
def get_generate_example_model(domain_name,role_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
role_en = domain_ch2en_dict[role_name]
task_id = domain_pinyin + '_' + role_en
es_result = es.get(index=weibo_role_index_name,doc_type=weibo_role_index_type,id=task_id)['_source']
item = es_result
#print 'es_result:::',es_result
# 政治倾向
political_side = json.loads(item['political_side'])[0][0]
if political_side == 'mid':
item['political_side'] = u'中立'
elif political_side == 'left':
item['political_side'] = u'左倾'
else:
item['political_side'] = u'右倾'
# 心理特征
psy_feature_list = []
psy_feature = json.loads(item['psy_feature'])
for i in range(TOP_PSY_FEATURE):
psy_feature_list.append(psy_feature[i][0])
item['psy_feature'] = '&'.join(psy_feature_list)
role_group_uids = json.loads(item['member_uids'])
mget_results = es_user_portrait.mget(index=portrait_index_name,doc_type=portrait_index_type,body={'ids':role_group_uids})['docs']
# topic_list = []
# for mget_item in mget_results:
# if mget_item['found']:
# keywords_list = json.loads(mget_item['_source']['keywords'])
# topic_list.extend(keywords_list)
# topic_keywords_dict = {}
# for topic_item in topic_list:
# keyword = topic_item[0]
# keyword_count = topic_item[1]
# try:
# topic_keywords_dict[keyword] += keyword_count
# except:
# topic_keywords_dict[keyword] = keyword_count
# monitor_keywords_list = []
# for i in range(3):
# keyword_max = max(topic_keywords_dict,key=topic_keywords_dict.get)
# monitor_keywords_list.append(keyword_max)
# del topic_keywords_dict[keyword_max]
# item['monitor_keywords'] = '&'.join(monitor_keywords_list)
if S_TYPE == 'test':
current_time = datetime2ts(S_DATE)
else:
current_time = int(time.time())
index_name_list = get_flow_text_index_list(current_time)
query_body_search = {
'query':{
'filtered':{
'filter':{
'terms':{'uid':role_group_uids}
}
}
},
'size':MAX_VALUE,
'_source':['keywords_string']
}
es_keyword_results = es_flow_text.search(index=index_name_list,doc_type=flow_text_index_type,\
body=query_body_search)['hits']['hits']
keywords_string = ''
for mget_item in es_keyword_results:
#print 'mget_item:::',mget_item
#if mget_item['found']:
keywords_string += '&'
keywords_string += mget_item['_source']['keywords_string']
k_dict = extract_keywords(keywords_string)
monitor_keywords_list = []
for item_item in k_dict:
monitor_keywords_list.append(item_item.word.encode('utf-8'))
item['monitor_keywords'] = ','.join(monitor_keywords_list)
mget_results_user = es_user_profile.mget(index=profile_index_name,doc_type=profile_index_type,body={'ids':role_group_uids})['docs']
item['nick_name'] = []
if mget_results_user:
for mget_item in mget_results_user:
#print 'mget_item:::',mget_item
if mget_item['found']:
item['nick_name'] = mget_item['_source']['nick_name']
item['location'] = mget_item['_source']['user_location']
item['gender'] = mget_item['_source']['sex']
uid = mget_item['_source']['uid']
try:
profile_results = es_user_portrait.get(index=profile_index_name,doc_type=profile_index_type,id=uid)['_source']
if profile_results['description']:
item['description'] = profile_results['description']
break
except:
pass
item['business_goal'] = u'渗透'
item['daily_interests'] = u'旅游'
# if S_TYPE == 'test':
# user_mget_results = es.mget(index=profile_index_name,doc_type=profile_index_type,body={'ids':role_group_uids})['docs']
# if user_mget_results
item['age'] = 30
item['career'] = u'自由职业'
active_time_list_np = np.array(json.loads(item['active_time']))
active_time_list_np_sort = np.argsort(-active_time_list_np)[:TOP_ACTIVE_TIME]
item['active_time'] = active_time_list_np_sort.tolist()
day_post_num_list = np.array(json.loads(item['day_post_num']))
item['day_post_num'] = np.mean(day_post_num_list).tolist()
item['role_name'] = role_name
task_id_new =domain_pinyin + '_' + role_en
example_model_file_name = EXAMPLE_MODEL_PATH + task_id_new + '.json'
try:
with open(example_model_file_name,"w") as dump_f:
json.dump(item,dump_f)
item_dict = dict()
#item_dict['xnr_user_no'] = xnr_user_no
item_dict['domain_name'] = domain_name
item_dict['role_name'] = role_name
es.index(index=weibo_example_model_index_name,doc_type=weibo_example_model_index_type,\
body=item_dict,id=task_id_new)
mark = True
except:
mark = False
return mark
def get_show_example_model():
#print '!!!!!',{'query':{'term':{'xnr_user_no':xnr_user_no}}}
es_results = es.search(index=weibo_example_model_index_name,doc_type=weibo_example_model_index_type,\
body={'query':{'match_all':{}}})['hits']['hits']
result_all = []
for result in es_results:
result = result['_source']
result_all.append(result)
return result_all
def get_export_example_model(domain_name,role_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
role_en = domain_ch2en_dict[role_name]
task_id = domain_pinyin + '_' + role_en
example_model_file_name = EXAMPLE_MODEL_PATH + task_id + '.json'
with open(example_model_file_name,"r") as dump_f:
es_result = json.load(dump_f)
return es_result
def get_create_type_content(create_type,keywords_string,seed_users,all_users):
create_type_new = {}
create_type_new['by_keywords'] = []
create_type_new['by_seed_users'] = []
create_type_new['by_all_users'] = []
if create_type == 'by_keywords':
if ',' in keywords_string:
create_type_new['by_keywords'] = keywords_string.encode('utf-8').split(',')
else:
create_type_new['by_keywords'] = keywords_string.encode('utf-8').split(',')
elif create_type == 'by_seed_users':
if ',' in seed_users:
create_type_new['by_seed_users'] = seed_users.encode('utf-8').split(',')
else:
create_type_new['by_seed_users'] = seed_users.encode('utf-8').split(',')
else:
if ',' in all_users:
create_type_new['all_users'] = all_users.encode('utf-8').split(',')
else:
create_type_new['all_users'] = all_users.encode('utf-8').split(',')
return create_type_new
def domain_update_task(domain_name,create_type,create_time,submitter,description,remark,compute_status=0):
task_id = pinyin.get(domain_name,format='strip',delimiter='_')
try:
domain_task_dict = dict()
#domain_task_dict['xnr_user_no'] = xnr_user_no
domain_task_dict['domain_pinyin'] = pinyin.get(domain_name,format='strip',delimiter='_')
domain_task_dict['domain_name'] = domain_name
domain_task_dict['create_type'] = json.dumps(create_type)
domain_task_dict['create_time'] = create_time
domain_task_dict['submitter'] = submitter
domain_task_dict['description'] = description
domain_task_dict['remark'] = remark
domain_task_dict['compute_status'] = compute_status
r.lpush(weibo_target_domain_detect_queue_name,json.dumps(domain_task_dict))
item_exist = dict()
#item_exist['xnr_user_no'] = domain_task_dict['xnr_user_no']
item_exist['domain_pinyin'] = domain_task_dict['domain_pinyin']
item_exist['domain_name'] = domain_task_dict['domain_name']
item_exist['create_type'] = domain_task_dict['create_type']
item_exist['create_time'] = domain_task_dict['create_time']
item_exist['submitter'] = domain_task_dict['submitter']
item_exist['description'] = domain_task_dict['description']
item_exist['remark'] = domain_task_dict['remark']
item_exist['group_size'] = ''
item_exist['compute_status'] = 0 # 存入创建信息
es.index(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,id=item_exist['domain_pinyin'],body=item_exist)
mark = True
except:
mark =False
return mark
def domain_create_task(domain_name,create_type,create_time,submitter,description,remark,compute_status=0):
task_id = pinyin.get(domain_name,format='strip',delimiter='_')
try:
es.get(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,id=task_id)['_source']
return 'domain name exists!'
except:
try:
domain_task_dict = dict()
#domain_task_dict['xnr_user_no'] = xnr_user_no
domain_task_dict['domain_pinyin'] = pinyin.get(domain_name,format='strip',delimiter='_')
domain_task_dict['domain_name'] = domain_name
domain_task_dict['create_type'] = json.dumps(create_type)
domain_task_dict['create_time'] = create_time
domain_task_dict['submitter'] = submitter
domain_task_dict['description'] = description
domain_task_dict['remark'] = remark
domain_task_dict['compute_status'] = compute_status
r.lpush(weibo_target_domain_detect_queue_name,json.dumps(domain_task_dict))
item_exist = dict()
#item_exist['xnr_user_no'] = domain_task_dict['xnr_user_no']
item_exist['domain_pinyin'] = domain_task_dict['domain_pinyin']
item_exist['domain_name'] = domain_task_dict['domain_name']
item_exist['create_type'] = domain_task_dict['create_type']
item_exist['create_time'] = domain_task_dict['create_time']
item_exist['submitter'] = domain_task_dict['submitter']
item_exist['description'] = domain_task_dict['description']
item_exist['remark'] = domain_task_dict['remark']
item_exist['group_size'] = ''
item_exist['compute_status'] = 0 # 存入创建信息
es.index(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,id=item_exist['domain_pinyin'],body=item_exist)
mark = True
except:
mark =False
return mark
def get_show_domain_group_summary(submitter):
es_result = es.search(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,\
body={'query':{'term':{'submitter':submitter}}})['hits']['hits']
if es_result:
result_all = []
for result in es_result:
item = {}
result = result['_source']
# print 'result::',result
# author xuan
if result['group_size'] == '' or result['group_size'] == 0:
item['group_size'] = 0
else:
item['group_size'] = result['group_size']
#item['group_size'] = result['group_size']
item['domain_name'] = result['domain_name']
item['create_time'] = result['create_time']
item['compute_status'] = result['compute_status']
item['create_type'] = result['create_type']
item['remark'] = result['remark']
item['description'] = result['description']
create_type = json.loads(result['create_type'].encode('utf-8'))
# if not create_type['by_keywords']:
# item['create_type'] = 'by_keywords'
# elif not create_type['by_seed_users']:
# item['create_type'] = 'by_seed_users'
# elif not create_type['by_all_users']:
# item['create_type'] = 'by_all_users'
result_all.append(item)
else:
return '当前账户尚未创建渗透领域'
return result_all
## 查看群体画像信息
def get_show_domain_group_detail_portrait(domain_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
es_result = es.get(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,\
id=domain_pinyin)['_source']
member_uids = es_result['member_uids']
es_mget_result = es_user_portrait.mget(index=portrait_index_name,doc_type=portrait_index_type,\
body={'ids':member_uids})['docs']
result_all = []
for result in es_mget_result:
item = {}
if result['found']:
result = result['_source']
item['uid'] = result['uid']
item['nick_name'] = result['uname']
item['photo_url'] = result['photo_url']
item['domain'] = result['domain']
item['sensitive'] = result['sensitive']
item['location'] = result['location']
item['fans_num'] = result['fansnum']
item['friends_num'] = result['friendsnum']
item['gender'] = result['gender']
item['home_page'] = 'http://weibo.com/'+result['uid']+'/profile?topnav=1&wvr=6&is_all=1'
# item['sensor_mark'] = judge_sensing_sensor(xnr_user_no,item['uid'])
# item['weibo_type'] = judge_follow_type(xnr_user_no,item['uid'])
item['influence'] = get_influence_relative(item['uid'],result['influence'])
else:
item['uid'] = result['_id']
item['nick_name'] = ''
item['photo_url'] = ''
item['domain'] = ''
item['sensitive'] = ''
item['location'] = ''
item['fans_num'] = ''
item['friends_num'] = ''
item['gender'] = ''
item['home_page'] = 'http://weibo.com/'+result['_id']+'/profile?topnav=1&wvr=6&is_all=1'
# item['sensor_mark'] = judge_sensing_sensor(xnr_user_no,result['_id'])
# item['weibo_type'] = judge_follow_type(xnr_user_no,result['_id'])
item['influence'] = ''
result_all.append(item)
return result_all
def get_show_domain_description(domain_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
es_result = es.get(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,\
id=domain_pinyin)['_source']
item = {}
item['group_size'] = es_result['group_size']
item['description'] = es_result['description']
topic_preference_list = json.loads(es_result['topic_preference'])
topic_preference_list_chinese = []
for topic_preference_item in topic_preference_list:
topic_preference_item_chinese = topic_en2ch_dict[topic_preference_item[0]]
topic_preference_list_chinese.append([topic_preference_item_chinese,topic_preference_item[1]])
item['topic_preference'] = topic_preference_list_chinese
item['word_preference'] = json.loads(es_result['top_keywords'])
role_distribute_list = json.loads(es_result['role_distribute'])
role_distribute_list_chinese = []
for role_distribute_item in role_distribute_list:
role_distribute_item_chinese = domain_en2ch_dict[role_distribute_item[0]]
role_distribute_list_chinese.append([role_distribute_item_chinese,role_distribute_item[1]])
item['role_distribute'] = role_distribute_list_chinese
political_side_list = json.loads(es_result['political_side'])
political_side_list_chinese = []
for political_side_item in political_side_list:
if political_side_item[0] == 'mid':
political_side_list_chinese.append([u'中立',political_side_item[1]])
elif political_side_item[0] == 'right':
political_side_list_chinese.append([u'右倾',political_side_item[1]])
else:
political_side_list_chinese.append([u'左倾',political_side_item[1]])
item['political_side'] = political_side_list_chinese
return item
def get_show_domain_role_info(domain_name,role_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
role_en = domain_ch2en_dict[role_name]
task_id = domain_pinyin + '_' + role_en
try:
es_result = es.get(index=weibo_role_index_name,doc_type=weibo_role_index_type,id=task_id)['_source']
except:
es_result = {}
return es_result
def get_delete_domain(domain_name):
domain_pinyin = pinyin.get(domain_name,format='strip',delimiter='_')
try:
es.delete(index=weibo_domain_index_name,doc_type=weibo_domain_index_type,id=domain_pinyin)
mark = True
except:
mark = False
return mark
###################################################################
################### Business Knowledge base ##################
###################################################################
###########functional module 1: sensitive words manage ###########
#step 1: create sensitive words
def get_create_sensitive_words(rank,sensitive_words,create_type,create_time,submitter):
task_detail = dict()
task_detail['rank'] = rank
task_detail['sensitive_words'] = sensitive_words
task_detail['create_type'] = create_type
task_detail['create_time'] = create_time
task_detail['submitter'] = submitter
task_id = sensitive_words
try:
es.index(index=weibo_sensitive_words_index_name,doc_type=weibo_sensitive_words_index_type,id=task_id,body=task_detail)
mark = True
except:
mark = False
return mark
#step 2: show the list of sensitive words
#step 2.1: show the list of sensitive words default
def show_sensitive_words_default():
query_body={
'query':{
'match_all':{}
},
'size':MAX_SEARCH_SIZE,
'sort':{'create_time':{'order':'desc'}}
}
result=es.search(index=weibo_sensitive_words_index_name,doc_type=weibo_sensitive_words_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
#step 2.2: show the list of sensitive words according to the condition
def show_sensitive_words_condition(create_type,rank):
show_condition_list=[]
if create_type and rank:
show_condition_list.append({'term':{'create_type':create_type}})
show_condition_list.append({'term':{'rank':rank}})
elif create_type:
show_condition_list.append({'term':{'create_type':create_type}})
elif rank:
show_condition_list.append({'term':{'rank':rank}})
query_body={
'query':{
'filtered':{
'filter':{
'bool':{
'must':show_condition_list
}
}
}
},
'size':MAX_SEARCH_SIZE,
'sort':{'create_time':{'order':'desc'}}
}
#print query_
if create_type or rank:
results=es.search(index=weibo_sensitive_words_index_name,doc_type=weibo_sensitive_words_index_type,body=query_body)['hits']['hits']
result=[]
for item in results:
item['_source']['id']=item['_id']
result.append(item['_source'])
else:
result=show_sensitive_words_default()
return result
#step 3: delete the sensitive word
def delete_sensitive_words(words_id):
try:
es.delete(index=weibo_sensitive_words_index_name,doc_type=weibo_sensitive_words_index_type,id=words_id)
result=True
except:
result=False
return result
#step 4: change the sensitive word
#step 4.2: change the selected sensitive word
def change_sensitive_words(words_id,change_info):
rank=change_info[0]
sensitive_words=change_info[1]
create_type=change_info[2]
create_time=change_info[3]
submitter=change_info[4]
try:
es.update(index=weibo_sensitive_words_index_name,doc_type=weibo_sensitive_words_index_type,id=words_id,\
body={"doc":{'rank':rank,'sensitive_words':sensitive_words,'create_type':create_type,'create_time':create_time,'submitter':submitter}})
result=True
except:
result=False
return result
########### functional module 2: time alert node manage #########
#step 1: add time alert node
def get_create_date_remind(date_name,timestamp,keywords,create_type,create_time,content_recommend,submitter):
task_detail = dict()
#task_detail['date_time'] = ts2datetime(int(timestamp))[5:10]
task_detail['date_name']=date_name
task_detail['date_time']=timestamp[5:10]
task_detail['keywords'] = keywords
task_detail['create_type'] = create_type
task_detail['create_time'] = create_time
task_detail['content_recommend']=content_recommend
task_detail['submitter']=submitter
task_id = create_time
try:
es.index(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,id=task_id,body=task_detail)
mark = True
except:
mark = False
return mark
#step 2: show the time alert node list
def show_date_remind():
query_body={
'query':{
'match_all':{}
},
'size':MAX_VALUE,
'sort':{'create_time':{'order':'desc'}}
}
result=es.search(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
def show_date_remind_condition(create_type):
query_body={
'query':{
'filtered':{
'filter':{
'term':{'create_type':create_type}
}
}
},
'size':MAX_VALUE,
'sort':{'create_time':{'order':'desc'}}
}
result=es.search(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,body=query_body)['hits']['hits']
# print result
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
#step 3: change the time alert node
#explain: Carry out show_select_date_remind before change,carry out step 3.1 & 3.2
#step 3.1: show the selected time alert node
def show_select_date_remind(task_id):
result=es.get(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,id=task_id)
return result
#step 3.2: change the selected time alert node
def change_date_remind(task_id,date_name,keywords,create_type,create_time):
date_result=es.get(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,id=task_id)['_source']
content_recommend=date_result['content_recommend']
date_time=date_result['date_time']
submitter=date_result['submitter']
#create_type=create_type
#keywords=keywords
#create_time=create_time
try:
es.update(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,id=task_id,\
body={"doc":{'date_name':date_name,'date_time':date_time,'keywords':keywords,'create_type':create_type,\
'create_time':create_time,'content_recommend':content_recommend,'submitter':submitter}})
result=True
except:
result=False
return result
#step 4: delete the time alert node
def delete_date_remind(task_id):
try:
es.delete(index=weibo_date_remind_index_name,doc_type=weibo_date_remind_index_type,id=task_id)
result=True
except:
result=False
return result
########### functional module 3: metaphorical expression ###########
#step 1: add metaphorical expression
def get_create_hidden_expression(origin_word,evolution_words_string,create_type,create_time,submitter):
task_detail = dict()
task_detail['origin_word'] = origin_word
task_detail['evolution_words_string'] = evolution_words_string
task_detail['create_type'] = create_type
task_detail['create_time'] = create_time
task_detail['submitter']=submitter
task_id = origin_word
try:
es.index(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,id=task_id,body=task_detail)
mark = True
except:
mark = False
return mark
#step 2: show the metaphorical expression list
def show_hidden_expression():
query_body={
'query':{
'match_all':{}
},
'size':MAX_VALUE,
'sort':{'create_time':{'order':'desc'}}
}
result=es.search(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
def show_hidden_expression_condition(create_type):
query_body={
'query':{
'filtered':{
'filter':{
'term':{'create_type':create_type}
}
}
},
'size':MAX_VALUE,
'sort':{'create_time':{'order':'desc'}}
}
result=es.search(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
#step 3: change the metaphorical expression
#step 3.1: show the selected hidden expression
def show_select_hidden_expression(express_id):
result=es.get(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,id=express_id)
return result
#step 3.2: change the selected hidden expression
def change_hidden_expression(express_id,change_info):
origin_word=change_info[0]
evolution_words_string=change_info[1]
create_type=change_info[2]
create_time=change_info[3]
submitter=change_info[4]
try:
es.update(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,id=express_id,\
body={"doc":{'origin_word':origin_word,'evolution_words_string':evolution_words_string,'create_type':create_type,'create_time':create_time,'submitter':submitter}})
result=True
except:
result=False
return result
#step 4: delete the metaphorical expression
def delete_hidden_expression(express_id):
try:
es.delete(index=weibo_hidden_expression_index_name,doc_type=weibo_hidden_expression_index_type,id=express_id)
result=True
except:
result=False
return result
###################################################################
################### weibo_corpus Knowledge base ##################
###################################################################
#step 1:create corpus
#corpus_info=[corpus_type,theme_daily_name,text,uid,mid,timestamp,retweeted,comment,like,create_type]
#subject corpus:corpus_type='主题语料'
#daily corpus:corpus_type='日常语料'
def create_corpus(corpus_info):
corpus_detail=dict()
corpus_detail['corpus_type']=corpus_info[0]
corpus_detail['theme_daily_name']=corpus_info[1]
corpus_detail['text']=corpus_info[2]
corpus_detail['uid']=corpus_info[3]
corpus_detail['mid']=corpus_info[4]
corpus_detail['timestamp']=corpus_info[5]
corpus_detail['retweeted']=corpus_info[6]
corpus_detail['comment']=corpus_info[7]
corpus_detail['like']=corpus_info[8]
corpus_detail['create_type']=corpus_info[9]
corpus_id=corpus_info[4] #mid
#print corpus_info
try:
es.index(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,id=corpus_id,body=corpus_detail)
mark=True
except:
mark=False
return mark
#step 2: show corpus
def show_corpus(corpus_type):
query_body={
'query':{
'filtered':{
'filter':{
'term':{'corpus_type':corpus_type}
}
}
},
'size':MAX_VALUE
}
result=es.search(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
def show_corpus_class(create_type,corpus_type):
query_body={
'query':{
'filtered':{
'filter':{
'term':{'corpus_type':corpus_type},
'term':{'create_type':create_type}
}
}
},
'size':MAX_VALUE
}
result=es.search(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
def show_condition_corpus(corpus_condition):
query_body={
'query':{
'filtered':{
'filter':{
'bool':{
'must':corpus_condition
}
}
}
},
'size':MAX_VALUE
}
result=es.search(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,body=query_body)['hits']['hits']
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
#观点语料
def get_opnion_corpus_type():
query_body = {
'query':{
'filtered':{
'filter':{
'bool':{
'must':{'term':{'status':1}}
}
}
}
},
'size':MAX_VALUE
}
try:
result_type = []
result = es.search(index=opinion_corpus_index_name,doc_type=opinion_corpus_index_type,body=query_body)['hits']['hits']
for item in result:
result_type.append(item['_source']['corpus_name'])
except:
result_type = []
return result_type
def get_label_name(corpus_name):
query_body = {
'query':{
'filtered':{
'filter':{
'bool':{
'must':{'terms':{'corpus_name':corpus_name}}
}
}
}
},
'size':MAX_VALUE
}
try:
label_name = []
result = es.search(index=opinion_corpus_index_name,doc_type=opinion_corpus_index_type,body=query_body)['hits']['hits']
for item in result:
label_name.append(item['_source']['corpus_pinyin'])
except:
label_name = []
return label_name
def show_all_opinion_corpus():
query_body={
'query':{
'match_all':{}
},
'size':200,
'sort':{'timestamp':{'order':'desc'}}
}
opinion_corpus_result = []
try:
result = es.search(index=all_opinion_corpus_index_name,doc_type=all_opinion_corpus_index_type,body=query_body)['hits']['hits']
# print 'result',result
for item in result:
item['_source']['_id'] = item['_id']
opinion_corpus_result.append(item['_source'])
except:
opinion_corpus_result = []
# print 'opinion_corpus_result',opinion_corpus_result
return opinion_corpus_result
# return result
def show_condition_opinion_corpus(theme_type):
query_body = {
'query':{
'filtered':{
'filter':{
'bool':{
'must':{'terms':{'label':theme_type}}
}
}
}
},
'size':200
}
opinion_corpus_result = []
try:
result = es.search(index=all_opinion_corpus_index_name,doc_type=all_opinion_corpus_index_type,body=query_body)['hits']['hits']
for item in result:
item['_source']['_id'] = item['_id']
opinion_corpus_result.append(item['_source'])
except:
opinion_corpus_result = []
return opinion_corpus_result
def show_different_corpus(task_detail):
result = dict()
theme_corpus = '主题语料'
daily_corpus = '日常语料'
opinion_corpus = '观点语料'
corpus_condition = []
theme_corpus_condition = []
daily_corpus_condition = []
result['opinion_corpus_type'] = get_opnion_corpus_type()
if task_detail['corpus_status'] == 0:
result['theme_corpus'] = show_corpus(theme_corpus)
result['daily_corpus'] = show_corpus(daily_corpus)
result['opinion_corpus'] = show_all_opinion_corpus()
else:
if task_detail['request_type'] == 'all':
if task_detail['create_type']:
result['theme_corpus'] = show_corpus_class(task_detail['create_type'],theme_corpus)
result['daily_corpus'] = show_corpus_class(task_detail['create_type'],daily_corpus)
result['opinion_corpus'] = show_all_opinion_corpus()
else:
pass
else:
corpus_condition = []
if task_detail['create_type']:
corpus_condition.append({'term':{'create_type':task_detail['create_type']}})
else:
corpus_condition = []
# print 'corpus::',corpus_condition
theme_corpus_condition = corpus_condition
if task_detail['theme_type_1']:
theme_corpus_condition.append({'terms':{'theme_daily_name':task_detail['theme_type_1']}})
theme_corpus_condition.append({'term':{'corpus_type':theme_corpus}})
result['theme_corpus'] = show_condition_corpus(theme_corpus_condition)
else:
if task_detail['create_type']:
result['theme_corpus'] = show_corpus_class(task_detail['create_type'],theme_corpus)
else:
result['theme_corpus'] = show_corpus(theme_corpus)
# print 'theme::',theme_corpus_condition
daily_corpus_condition = []
if task_detail['theme_type_2']:
if task_detail['create_type']:
daily_corpus_condition.append({'term':{'create_type':task_detail['create_type']}})
else:
pass
daily_corpus_condition.append({'terms':{'theme_daily_name':task_detail['theme_type_2']}})
daily_corpus_condition.append({'term':{'corpus_type':daily_corpus}})
# print 'daily::',daily_corpus_condition
result['daily_corpus'] = show_condition_corpus(daily_corpus_condition)
else:
if task_detail['create_type']:
result['daily_corpus'] = show_corpus_class(task_detail['create_type'],daily_corpus)
else:
result['daily_corpus'] = show_corpus(daily_corpus)
if task_detail['theme_type_3']:
# label = get_label_name(task_detail['theme_type_3'])
# result['opinion_corpus'] = show_condition_opinion_corpus(label)
result['opinion_corpus'] = show_condition_opinion_corpus(task_detail['theme_type_3'])
else:
result['opinion_corpus'] = show_all_opinion_corpus()
return result
#step 3: change the corpus
#explain:carry out show_select_corpus before change,carry out step 3.1 & 3.2
#step 3.1: show the selected corpus
def show_select_corpus(corpus_id):
result=es.get(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,id=corpus_id)
results=[]
for item in result:
item['_source']['id']=item['_id']
results.append(item['_source'])
return results
#step 3.2: change the selected corpus
def change_select_corpus(corpus_id,corpus_type,theme_daily_name,create_type):
corpus_result=es.get(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,id=corpus_id)['_source']
text=corpus_result['text']
uid=corpus_result['uid']
mid=corpus_result['mid']
timestamp=corpus_result['timestamp']
retweeted=corpus_result['retweeted']
comment=corpus_result['comment']
like=corpus_result['like']
corpus_type=corpus_type
theme_daily_name=theme_daily_name
create_type=create_type
try:
es.update(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,id=corpus_id,\
body={"doc":{'corpus_type':corpus_type,'theme_daily_name':theme_daily_name,'text':text,\
'uid':uid,'mid':mid,'timestamp':timestamp,'retweeted':retweeted,'comment':comment,'like':like,'create_type':create_type}})
result=True
except:
result=False
return result
#step 4: delete the corpus
def delete_corpus(corpus_id):
try:
es.delete(index=weibo_xnr_corpus_index_name,doc_type=weibo_xnr_corpus_index_type,id=corpus_id)
result=True
except:
result=False
return result
def delete_opinion_corpus(corpus_id):
try:
es.delete(index=all_opinion_corpus_index_name,doc_type=all_opinion_corpus_index_type,id=corpus_id)
result=True
except:
result=False
return result
def text_list(text_content):
text = []
if text_content:
for item in text_content:
text.append(item['text'])
else:
pass
return text
def show_opinion_corpus():
corpus_type = get_opnion_corpus_type()
corpus_dict = dict()
for item in corpus_type:
text_content = show_condition_opinion_corpus(item)
corpus_dict[item] = text_list(text_content)
return corpus_dict
| [
"[email protected]"
] | |
175dac1813e30dfc38d2330b3134a90625c9dacc | 3d192f5ebe208a9603460d7bc248a5e983bd49e1 | /main_list.py | dc59cc6d0f399323fb087d8c5a02697e1a5038f5 | [] | no_license | 535521469/list_shc | 43e83705127b601fdfa3f1688f450d9ddae96bb9 | 0d93baec3a64a532ab95498805247784262dd684 | refs/heads/master | 2021-01-15T21:10:03.980779 | 2013-11-28T05:44:17 | 2013-11-28T05:44:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,202 | py | # encoding=utf8
'''
Created on 2013-3-20
@author: corleone
'''
from crawler.shc.fe.const import FEConstant as const
from multiprocessing import Process
from sched import scheduler
from scrapy.cmdline import execute
from scrapy.settings import CrawlerSettings
import collections
import datetime
import time
from bot.config import configdata
from const import ListSpiderConst, ScrapyConst, AppConst
import os
class SpiderProcess(Process):
def __init__(self, city_name, configdata):
Process.__init__(self)
self.city_name = city_name
self.configdata = dict(configdata)
self.configdata[const.CURRENT_CITY] = city_name
def run(self):
feconfig = self.configdata[const.FE_CONFIG]
try:
#=======================================================================
# if the city use the default config
#=======================================================================
city_config = eval(feconfig[self.city_name])
except Exception:
city_config = {}
start_page = city_config.get(const.START_PAGE,
feconfig[const.DEFAULT_START_PAGE])
end_page = city_config.get(const.END_PAGE,
feconfig[const.DEFAULT_END_PAGE])
# values = {
# const.CONFIG_DATA:self.configdata,
# const.START_PAGE:int(start_page),
# const.END_PAGE:int(end_page),
# }
# settings = u'crawler.shc.fe.settings'
# module_import = __import__(settings, {}, {}, [''])
# settings = CrawlerSettings(module_import, values=values)
# execute(argv=["scrapy", "crawl", 'SHCSpider' ], settings=settings)
values = configdata.get(ListSpiderConst.ListSettings, {})
values.update(**{
const.CONFIG_DATA:self.configdata,
const.START_PAGE:int(start_page),
const.END_PAGE:int(end_page),
})
if ScrapyConst.Console in values:
if values[ScrapyConst.Console] == u'1':# out to console
values[ScrapyConst.LOG_FILE] = None
else:
log_dir = values.get(ScrapyConst.LOG_DIR, os.getcwd())
if ScrapyConst.LOG_FILE in values:
log_file = values[ScrapyConst.LOG_FILE]
values[ScrapyConst.LOG_FILE] = os.sep.join([log_dir , log_file])
settings_path = u'crawler.shc.fe.settings'
module_import = __import__(settings_path, {}, {}, [''])
settings = CrawlerSettings(module_import, values=values)
execute(argv=["scrapy", "crawl", 'SHCSpider' ], settings=settings)
spider_process_mapping = {}
def add_task(root_scheduler):
city_names = configdata[const.FE_CONFIG][const.FE_CONFIG_CITIES].split(u',')
processes = collections.deque()
for city_name in city_names :
p = SpiderProcess(city_name, configdata)
spider_process_mapping[city_name] = p
processes.append(p)
if len(processes):
root_scheduler.enter(1, 1, check_add_process,
(spider_process_mapping, processes,
root_scheduler, configdata))
def check_add_process(spider_process_mapping, processes,
root_scheduler, configdata):
alives = filter(Process.is_alive, spider_process_mapping.values())
if len(processes):
pool_size = int(configdata[const.FE_CONFIG].get(const.MULTI, 1))
if len(alives) < pool_size:
p = processes.popleft()
print (u'%s enqueue %s ,pool size %d , %d cities '
'waiting ') % (datetime.datetime.now(), p.city_name,
pool_size, len(processes))
root_scheduler.enter(0, 1, p.start, ())
#=======================================================================
# check to add process 10 seconds later
#=======================================================================
if not len(processes):
print (u'%s all process enqueue ...' % datetime.datetime.now())
root_scheduler.enter(5, 1, check_add_process
, (spider_process_mapping, processes,
root_scheduler, configdata))
else:
if len(alives) == 0:
print ('%s crawl finished ... ' % datetime.datetime.now())
else :
root_scheduler.enter(5, 1, check_add_process
, (spider_process_mapping, processes,
root_scheduler, configdata))
if __name__ == '__main__':
frequence = configdata[AppConst.app_config].get(AppConst.app_config_frequence, 1800)
frequence = int(frequence)
while 1:
root_scheduler = scheduler(time.time, time.sleep)
root_scheduler.enter(0, 0, add_task, (root_scheduler,))
root_scheduler.run()
print u'%s sleep %s seconds' % (datetime.datetime.now(), frequence)
time.sleep(frequence)
| [
"[email protected]"
] | |
8876284a7a38ed4b8daedc0a42e8722bf52cf232 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/AbnormalRecovery-bak/bak/XOGW_RESTART_PGJK_IPO.py | 4445242cb1451a5c0c23ad77987bf2e8101fe124 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,296 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from Autocase_Result.AbnormalRecovery.ARservice.ARmainservice import *
from utils.env_restart import *
from service.ServiceConfig import *
from xtp.api.xtp_test_case import *
from xtp.api.config import ALL_USER
# 执行用例前,将所有数据清除
clear_data_and_restart_all()
class tradeApi(object):
const = XTPConst()
trade = XTPTradeApi(ALL_USER[0])
class XOGW_RESTART_PGJK_IPO(unittest.TestCase):
def test_XOGW_RESTART_PGJK_IPO(self):
title = '异常恢复:重启报盘-配股缴款、新股申购'
logger.warning(title)
for user in ALL_USER:
# 当前用户登录
tradeApi.trade.Login(user)
wt_reqs = {
'business_type': tradeApi.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_ALLOTMENT'],
'order_client_id': 1,
'market': tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': '080001',
'side': tradeApi.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': tradeApi.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': 0,
'quantity': 1000
}
# 配股缴款下单,深圳配股缴款仅有未成交和已撤两种状态
service_insertorder(tradeApi, wt_reqs, user)
service_insertorder(tradeApi, wt_reqs, user)
service_cancleorder(tradeApi, wt_reqs, user)
# 配股缴款下单,上海配股缴款仅有未成交和全成两种状态,无法撤单
wt_reqs['ticker'] = '700001'
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SH_A']
for client_id in range(1,3):
wt_reqs['order_client_id'] = client_id
service_insertorder(tradeApi, wt_reqs, user)
# 新股申购下单,新股申购仅有未成交状态
wt_reqs['business_type'] = tradeApi.const.XTP_BUSINESS_TYPE[
'XTP_BUSINESS_TYPE_IPOS']
wt_reqs['order_client_id'] = 1
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A']
wt_reqs['ticker'] = '002846'
service_insertorder(tradeApi, wt_reqs, user)
wt_reqs['market'] = tradeApi.const.XTP_MARKET_TYPE['XTP_MKT_SH_A']
wt_reqs['ticker'] = '732818'
service_insertorder(tradeApi, wt_reqs, user)
# 查询当前用户的资金和持仓
query_capital_stock(tradeApi,order_info ,user,wt_reqs['ticker'])
# 当前用户登出
tradeApi.trade.Logout()
# 重启环境
xogwsh_restart()
xogwsz_restart()
time.sleep(3)
for user in ALL_USER:
# 重启后用户登录,接收OMS推送的订单信息
service_restart(tradeApi,user)
# 查询当前用户的资金和持仓
query_capital_stock(tradeApi,restart_info ,user,wt_reqs['ticker'])
# 重启环境前后,各用户订单信息校验
result = check_result(order_info, restart_info)
self.assertEqual(result['结果'], True)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a2c06d42d13a74881d8ce2770a42a240f90ffa8b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/nxr.py | 78b3d4076a27f4f810c52838a61eaa9ef5ab7d72 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'nxR':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
315aad6b034c11627f71ff2689bea84cf59bba2b | 4a48593a04284ef997f377abee8db61d6332c322 | /python/pyqt/pyqt5/widget_QTableWidget.py | ac6b9f412cde6986dc866f49d1e4a4e191922386 | [
"MIT"
] | permissive | jeremiedecock/snippets | 8feaed5a8d873d67932ef798e16cb6d2c47609f0 | b90a444041c42d176d096fed14852d20d19adaa7 | refs/heads/master | 2023-08-31T04:28:09.302968 | 2023-08-21T07:22:38 | 2023-08-21T07:22:38 | 36,926,494 | 26 | 9 | MIT | 2023-06-06T02:17:44 | 2015-06-05T10:19:09 | Python | UTF-8 | Python | false | false | 2,001 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See https://pythonspot.com/en/pyqt5-table/
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QTableWidget, QTableWidgetItem, QVBoxLayout
from PyQt5.QtCore import pyqtSlot
class MyTableWidget(QWidget):
def __init__(self):
super().__init__()
# Create a table
self.tableWidget = QTableWidget()
self.tableWidget.setRowCount(4)
self.tableWidget.setColumnCount(2)
# Add data
self.tableWidget.setItem(0, 0, QTableWidgetItem("Cell (1,1)"))
self.tableWidget.setItem(0, 1, QTableWidgetItem("Cell (1,2)"))
self.tableWidget.setItem(1, 0, QTableWidgetItem("Cell (2,1)"))
self.tableWidget.setItem(1, 1, QTableWidgetItem("Cell (2,2)"))
self.tableWidget.setItem(2, 0, QTableWidgetItem("Cell (3,1)"))
self.tableWidget.setItem(2, 1, QTableWidgetItem("Cell (3,2)"))
self.tableWidget.setItem(3, 0, QTableWidgetItem("Cell (4,1)"))
self.tableWidget.setItem(3, 1, QTableWidgetItem("Cell (4,2)"))
# Table selection callback
self.tableWidget.doubleClicked.connect(self.on_click)
# Set the layout
layout = QVBoxLayout()
layout.addWidget(self.tableWidget)
self.setLayout(layout)
@pyqtSlot()
def on_click(self):
for currentQTableWidgetItem in self.tableWidget.selectedItems():
print(currentQTableWidgetItem.row(), currentQTableWidgetItem.column(), currentQTableWidgetItem.text())
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyTableWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| [
"[email protected]"
] | |
88d4fa2d0076946d04bbd1b4a6f4885cde020362 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Gdk/EventTouchpadSwipe.py | d2868a48bee0f1424400e2bd2a7e6dbbb90e218c | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 5,968 | py | # encoding: utf-8
# module gi.repository.Gdk
# from /usr/lib64/girepository-1.0/Gdk-2.0.typelib
# by generator 1.147
# no doc
# imports
import gi as __gi
import gi.overrides as __gi_overrides
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gobject as __gobject
class EventTouchpadSwipe(__gi.Struct):
"""
:Constructors:
::
EventTouchpadSwipe()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
dx = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
dy = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
n_fingers = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
phase = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
send_event = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
state = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
time = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
type = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
window = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
x = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
x_root = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
y = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
y_root = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(EventTouchpadSwipe), '__module__': 'gi.repository.Gdk', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'EventTouchpadSwipe' objects>, '__weakref__': <attribute '__weakref__' of 'EventTouchpadSwipe' objects>, '__doc__': None, 'type': <property object at 0x7fbaf81637c0>, 'window': <property object at 0x7fbaf81638b0>, 'send_event': <property object at 0x7fbaf81639a0>, 'phase': <property object at 0x7fbaf8163a90>, 'n_fingers': <property object at 0x7fbaf8163b80>, 'time': <property object at 0x7fbaf8163c70>, 'x': <property object at 0x7fbaf8163d60>, 'y': <property object at 0x7fbaf8163e50>, 'dx': <property object at 0x7fbaf8163f40>, 'dy': <property object at 0x7fbaf8164090>, 'x_root': <property object at 0x7fbaf8164180>, 'y_root': <property object at 0x7fbaf8164270>, 'state': <property object at 0x7fbaf8164360>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(EventTouchpadSwipe)
| [
"[email protected]"
] | |
9e90bb8d779df640147384a893d77b07f2666499 | e44d00ffcea03f8656c40b3d4d993d51a38af3b0 | /leetcode/June/J30_WordSearch.py | b5d00f420d9170ce3aba83d26afc9c78c2c1d6ed | [] | no_license | Ayushmanglani/competitive_coding | d6beec4f2b24aef34ea44c3a4a72074985b4a766 | 12325b09ae2bc6b169578b6a0a091069e14c9227 | refs/heads/master | 2023-06-12T04:43:41.130774 | 2021-07-03T13:01:37 | 2021-07-03T13:01:37 | 262,079,363 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | WORD_KEY = '$'
trie = {}
for word in words:
node = trie
for letter in word:
# retrieve the next node; If not found, create a empty node.
node = node.setdefault(letter, {})
# mark the existence of a word in trie node
node[WORD_KEY] = word
rowNum = len(board)
colNum = len(board[0])
matchedWords = []
def backtracking(row, col, parent):
letter = board[row][col]
currNode = parent[letter]
# check if we find a match of word
word_match = currNode.pop(WORD_KEY, False)
if word_match:
# also we removed the matched word to avoid duplicates,
# as well as avoiding using set() for results.
matchedWords.append(word_match)
# Before the EXPLORATION, mark the cell as visited
board[row][col] = '#'
# Explore the neighbors in 4 directions, i.e. up, right, down, left
for (rowOffset, colOffset) in [(-1, 0), (0, 1), (1, 0), (0, -1)]:
newRow, newCol = row + rowOffset, col + colOffset
if newRow < 0 or newRow >= rowNum or newCol < 0 or newCol >= colNum:
continue
if not board[newRow][newCol] in currNode:
continue
backtracking(newRow, newCol, currNode)
# End of EXPLORATION, we restore the cell
board[row][col] = letter
# Optimization: incrementally remove the matched leaf node in Trie.
if not currNode:
parent.pop(letter)
for row in range(rowNum):
for col in range(colNum):
# starting from each of the cells
if board[row][col] in trie:
backtracking(row, col, trie)
return matchedWords | [
"[email protected]"
] | |
bcf362efa27bf9b944a7809c71d7d948778c7f5b | 6351221d588668804e2df01936732eede4d96ed0 | /leetcode-cn/Python/75.颜色分类.py | 4313aa9c973f61f6ebfa98ea1f9aed7a874433b6 | [] | no_license | LogicJake/code-for-interview | 8e4ec9e24ec661a443ad42aa2496d78a1fbc8a3f | 5990b09866696c2f3e845047c755fa72553dd421 | refs/heads/master | 2021-09-20T20:19:17.118333 | 2021-09-14T13:46:30 | 2021-09-14T13:46:30 | 102,202,212 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | #
# @lc app=leetcode.cn id=75 lang=python3
#
# [75] 颜色分类
#
# @lc code=start
from typing import List
class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
p0 = 0
p2 = len(nums) - 1
i = 0
while i <= p2:
while i <= p2 and nums[i] == 2:
nums[i], nums[p2] = nums[p2], nums[i]
p2 -= 1
if nums[i] == 0:
nums[p0], nums[i] = nums[i], nums[p0]
p0 += 1
i += 1
# @lc code=end
| [
"[email protected]"
] | |
39cf2d0c2245eb8d9b2517f31f7b202604cb3c5d | 7f6ad639d41ad522ae73cb87ee61da48d83dcd27 | /hamnadmin/hamnadmin/mailqueue/management/commands/send_queued_mail.py | 301c769bebde70fefdcdf2b48a8d818b20aa583a | [] | no_license | mhagander/hamn | 0aedaea24c32903480b580273ce272e26cc25d5b | c7271662c7726749d11e47f3064bec80b0e95c4a | refs/heads/master | 2023-08-31T05:05:07.160357 | 2023-08-24T09:02:52 | 2023-08-24T09:02:52 | 729,253 | 2 | 2 | null | 2017-06-08T07:32:48 | 2010-06-19T13:48:04 | Python | UTF-8 | Python | false | false | 1,411 | py | # Script to send off all queued email.
#
# This script is intended to be run frequently from cron. We queue things
# up in the db so that they get automatically rolled back as necessary,
# but once we reach this point we're just going to send all of them one
# by one.
#
from django.core.management.base import BaseCommand, CommandError
from django.db import connection
import smtplib
from hamnadmin.mailqueue.models import QueuedMail
class Command(BaseCommand):
help = 'Send queued mail'
def handle(self, *args, **options):
# Grab advisory lock, if available. Lock id is just a random number
# since we only need to interlock against ourselves. The lock is
# automatically released when we're done.
curs = connection.cursor()
curs.execute("SELECT pg_try_advisory_lock(72181378)")
if not curs.fetchall()[0][0]:
raise CommandError("Failed to get advisory lock, existing send_queued_mail process stuck?")
for m in QueuedMail.objects.all():
# Yes, we do a new connection for each run. Just because we can.
# If it fails we'll throw an exception and just come back on the
# next cron job. And local delivery should never fail...
smtp = smtplib.SMTP("localhost")
smtp.sendmail(m.sender, m.receiver, m.fullmsg.encode('utf-8'))
smtp.close()
m.delete()
| [
"[email protected]"
] | |
c476dffe1424e9b39be4b00edfc9aad451a77a0f | a7c4478e6fdec7cf1a5f22b9eba5e11afc537503 | /app/main/errors.py | 129fff301ce85ae5f1f6f5d546092fe5a73a525a | [] | no_license | deveshaggrawal19/waasle | 457fe686a18ce9d5162abc9b3fd5041d7938ee23 | 69e00a29175d0771d8ff920397dc08d37d3cc3dc | refs/heads/master | 2021-04-26T22:19:32.317110 | 2016-11-17T18:51:17 | 2016-11-17T18:51:17 | 71,818,197 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 186 | py | from flask import render_template
from ..main import main
@main.app_errorhandler(404) # Will route all the errors
def page_not_found(e):
return render_template('404.html'), 404 | [
"[email protected]"
] | |
ea193671f595da467bd64814a0cf7ba0d1e8566d | 74515f9e059aa8a73e63d735abbac69d99713c69 | /src/tournai/urban/dataimport/interfaces.py | c03599111093db2e481981427c2974da1bb70e5b | [] | no_license | IMIO/tournai.urban.dataimport_22 | b7285eaf15aec02dfa778881d4c53b02cfcc1466 | c1e9db3edeab1da154fdff2d078d88802ea7bb24 | refs/heads/master | 2020-12-30T16:02:30.853395 | 2018-03-23T14:14:23 | 2018-03-23T14:14:23 | 90,954,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 440 | py | # -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from plone.theme.interfaces import IDefaultPloneLayer
from imio.urban.dataimport.interfaces import IUrbanDataImporter
class ITournaiUrbanDataimportLayer(IDefaultPloneLayer):
""" Marker interface that defines a Zope 3 browser layer."""
class ITournaiDataImporter(IUrbanDataImporter):
""" Marker interface for ITournai agorawin importer """
| [
"[email protected]"
] | |
d98242cf54552fe3ac8c77d0b97a6bdf536e0756 | f93998e1c5c5c50bf20aed8d5b3517b12c333fdb | /wellsfargo/migrations/0003_auto_20160524_1127.py | 2d1bd6aa4fbbf69c580061fe78dd97723e4b7eab | [
"ISC"
] | permissive | pombredanne/django-oscar-wfrs | 637130651ab0d15289c4b3b3b86a42ada306fe96 | 991b79d2bd8a22512861bb3117c2bb5444c467b2 | refs/heads/master | 2021-01-17T23:41:10.424343 | 2016-05-28T01:20:30 | 2016-05-28T01:20:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,623 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-24 11:27
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wellsfargo', '0002_auto_20160523_2127'),
]
operations = [
migrations.AddField(
model_name='cacreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='cajointcreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='uscreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AddField(
model_name='usjointcreditapp',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credit_applications', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
]
| [
"[email protected]"
] | |
6d5fc62c7c29032963e791bf480302878fd25bf3 | e70a5960b60bf6c11df4248625d0188ededdd4c7 | /Function/GUI/GUI_main/note_string.py | f13b73e15b9279706e1d53de8d8a68e661cfbc22 | [] | no_license | wx2000qq/MoDeng | 70be2802b6191855667cce5fe3cd89afda5fb9a9 | 9144bb79c237c0361b40f314b2c3123d58ac71cc | refs/heads/master | 2020-11-24T11:52:08.829630 | 2019-12-15T04:54:25 | 2019-12-15T04:54:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,525 | py | # encoding=utf-8
"""
将提示字符串规整在一起,防止在代码中占用太多地方
统一用note开头
"""
# 初始化图片提示
note_init_pic = \
"""
----------------------------------------------------------------------------------
小提示:
当前正在生成“三大指数”、“持仓stk”和“关注stk”的小时、日、周和月的MACD图!
以及日级别的KD、MOM、RSI、SAR等指标的图。
小时级别的MACD图半小时更新一次,其余图片启动软件时生成最新的,软件开启后暂不更新!
----------------------------------------------------------------------------------
""" + '\n'
# 软件初启动时的日线判断提示
note_day_analysis = \
"""
----------------------------------------------------------------------------------
小提示:
以下是日线的MACD拐点判断和SAR指标的判断。
红色:未来数日看多
绿色:未来数日看空
需要观察走势图做进一步的主观判断!
----------------------------------------------------------------------------------
""" + '\n'
note_middle_rank = \
"""
----------------------------------------------------------------------------------
小提示:
所谓“中期水平检测”是对自己的“持仓stk”和“关注stk”的当前价格在两个月内的水平
进行统计排名,由低到高排序,越在前面的,表示当前价格越是处于低位!
level这一列表示处于低位的实际情况,是一个0~100的数,比如12.2表示当前价格只超过了两
个月来12.2%的时间!
----------------------------------------------------------------------------------
""" + '\n'
note_macd_inflection_point = \
"""
----------------------------------------------------------------------------------
小提示:
所谓“拐点检测”是对自己的“持仓stk”和“关注stk”以及“三大指数”的小时级别和半
小时级别的MACD柱子进行分析,找出“开始上涨”和“开始下跌”的情况,在控制台向用户提
示,用户收到提示后可以查看其相应的MACD图,以便对价格走势做进一步的判断!
----------------------------------------------------------------------------------
""" + '\n'
note_sar_inflection_point = \
"""
----------------------------------------------------------------------------------
小提示:
当前正在对半小时指标进行拐点检测...
所谓“拐点检测”是对自己的“持仓stk”和“关注stk”以及“三大指数”的半
小时级别的SAR等指标进行分析,找出“开始上涨”和“开始下跌”的情况,在控制台向用户提
示,用户收到提示后可以查看其相应的指标图,以便对价格走势做进一步的判断!
----------------------------------------------------------------------------------
""" + '\n'
note_dengshen_welcome = \
"""
==================================================
小主您好,我是灯神,很乐意为您效劳!
我的技能:
@ 配置
---------
您可以通过输入
增加持仓 美的集团
删除持仓 美的集团
增加关注 格力电器
删除关注 格力电器
查看关注
查看持仓
来增删、查看 “持仓” 和 “关注”的股票
@ 预测明日大盘
-----------------
输入“预测明日大盘”可以预测明日上证、深证和创业板三大板指的 最高点 最低点 和 收盘价,
可对明日走势略窥一二。
@ 清理
----------
输入“清理”进行清屏
@ 帮助
--------
输入“帮助”打印命令帮助
==================================================
小主请吩咐:
"""
total_cmd = \
"""
所有命令(以美的集团为例)
==================================================
增加持仓 美的集团
删除持仓 美的集团
增加关注 美的集团
删除关注 美的集团
查看关注
查看持仓
查看b记录 美的集团
美的集团 买入 300 54.5 (以一股54.5块钱买入300股美的集团的股票)
美的集团 卖出 500 16.4
清理
帮助
预测大盘指数
==================================================
""" | [
"[email protected]"
] | |
2644c8ca38324e9a27a0a32fe48c7fa1e3a4b2ca | 9d8a3a2c0a15dbf1f90d801e6d705d1212cf09af | /services/web__rionegro_com_ar.py | cd01900105e6f20fc372cc37095e8cfcbc691854 | [] | no_license | rudolphos/NewsGrabber | f9bddc9a9b3a9e02f716133fd746f48cee635b36 | 86354fb769b2710ac7cdd5bd8795e43158b70ad2 | refs/heads/master | 2021-01-12T12:07:55.335079 | 2016-10-09T22:39:17 | 2016-10-09T22:39:17 | 72,316,773 | 0 | 0 | null | 2016-10-30T00:35:08 | 2016-10-30T00:35:08 | null | UTF-8 | Python | false | false | 213 | py | refresh = 4
version = 20160403.01
urls = ['http://www.rionegro.com.ar/',
'http://www.rionegro.com.ar/diario/ultimas-noticias.aspx']
regex = [r'^https?:\/\/[^\/]*rionegro\.com\.ar']
videoregex = []
liveregex = [] | [
"[email protected]"
] | |
28bafc6808151dfca0608b676e7311af110fe7cd | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/mmdeploy/mmdeploy/backend/ncnn/quant.py | 7bddda80b7addab282d7ccd92746cae829ba53ec | [
"Apache-2.0"
] | permissive | fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | C++ | UTF-8 | Python | false | false | 2,119 | py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from subprocess import call
from typing import List
import mmcv
from .init_plugins import get_ncnn2int8_path
def get_quant_model_file(onnx_path: str, work_dir: str) -> List[str]:
"""Returns the path to quant onnx and table with export result.
Args:
onnx_path (str): The path to the fp32 onnx model.
work_dir (str): The path to the directory for saving the results.
Returns:
List[str]: The path to the files where the export result will be
located.
"""
mmcv.mkdir_or_exist(osp.abspath(work_dir))
base_name = osp.splitext(osp.split(onnx_path)[1])[0]
quant_onnx = osp.join(work_dir, base_name + '_quant.onnx')
quant_table = osp.join(work_dir, base_name + '.table')
quant_param = osp.join(work_dir, base_name + '_int8.param')
quant_bin = osp.join(work_dir, base_name + '_int8.bin')
return [quant_onnx, quant_table, quant_param, quant_bin]
def ncnn2int8(param: str, bin: str, table: str, int8_param: str,
int8_bin: str):
"""Convert ncnn float model to quantized model.
The inputs of ncnn include float model and weight file. We need to use
a executable program to convert the float model to int8 model with
calibration table.
Example:
>>> from mmdeploy.backend.ncnn.quant import ncnn2int8
>>> param = 'work_dir/end2end.param'
>>> bin = 'work_dir/end2end.bin'
>>> table = 'work_dir/end2end.table'
>>> int8_param = 'work_dir/end2end_int8.param'
>>> int8_bin = 'work_dir/end2end_int8.bin'
>>> ncnn2int8(param, bin, table, int8_param, int8_bin)
Args:
param (str): The path of ncnn float model graph.
bin (str): The path of ncnn float weight model weight.
table (str): The path of ncnn calibration table.
int8_param (str): The path of ncnn low bit model graph.
int8_bin (str): The path of ncnn low bit weight model weight.
"""
ncnn2int8 = get_ncnn2int8_path()
call([ncnn2int8, param, bin, int8_param, int8_bin, table])
| [
"[email protected]"
] | |
612a9cfe6c7e2307b32cf0a91d982b8221012697 | 5a648d5c62e640a8df8d18549eaf6e84a36dbd28 | /findk.py | eb35c580c67910fc18867b2273ac68599fcf99ef | [
"MIT"
] | permissive | quake0day/oj | f5f8576f765a76f0f3a8b2c559db06279e93ef25 | c09333d1738f8735de0d5d825db6f4b707585670 | refs/heads/master | 2021-01-21T04:27:34.035319 | 2016-03-30T02:19:15 | 2016-03-30T02:19:15 | 30,592,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | a = "leetcode"
b = "codyabs"
k = 3
def findK(a,b, k):
m = len(a)
n = len(b)
h = {}
for i in xrange(m-k):
h[a[i:i+k]] = True
#dp = [[False] * n for _ in range(m)]
#dp[0][0] = True
for j in xrange(n-k):
if b[j:j+k] in h:
return True
def findKDP(a,b,k):
m = len(a)
n = len(b)
dp = [[0] * (n+1) for _ in range(m+1)]
for i in xrange(1, m):
for j in xrange(1, n):
if a[i-1] == b[j-1]:
dp[i][j] = dp[i-1][j-1] + 1
if dp[i][j] >= k:
return True
#print dp
return False
print findK(a,b,k)
print findKDP(a,b,k) | [
"[email protected]"
] | |
7b5f1ea57196f64b077d3972a4c3dce973abb7c2 | 7d8a4d58fc4c5a73ce8c85e513253a86d6290d3b | /plugin.video.destinyds/default.py | 1b500fb03a19198561fad5bb03af7b19238f1903 | [] | no_license | bopopescu/icon | cda26d4463d264b7e2080da51f29d84cc48dfb81 | e385a6225dd11b7fea5a11215d655cf5006bb018 | refs/heads/master | 2022-01-12T19:00:04.951604 | 2019-07-10T05:35:44 | 2019-07-10T05:35:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610,962 | py | # -*- coding: utf-8 -*-
import xbmcaddon,os,xbmc,xbmcgui,urllib,urllib2,re,xbmcplugin,sys,logging,time,random
from os import listdir
from os.path import isfile, join
Addon = xbmcaddon.Addon()
import pyxbmct
import requests,json
import xbmcvfs
import koding
import socket
import threading
global done_nextup
global all_data_imdb
global susb_data,susb_data_next
global all_s_in
global stop_window
global stop_try_play,global_result
global playing_text,mag_start_time_new
global now_playing_server,stop_all,close_on_error
global wait_for_subs
global in_next_ep
in_next_ep=0
wait_for_subs=0
close_on_error=0
global done1,done1_1,close_sources_now,once_fast_play
once_fast_play=0
close_sources_now=0
done1_1=0
done1=0
stop_all=0
now_playing_server=''
mag_start_time_new=0
global_result=''
playing_text=''
stop_try_play=False
stop_window=False
all_s_in=({},0,'','','')
all_data_imdb=[]
done_nextup=0
addonPath = xbmc.translatePath(Addon.getAddonInfo("path")).decode("utf-8")
libDir = os.path.join(addonPath, 'resources', 'lib')
sys.path.append( libDir)
libDir = os.path.join(addonPath, 'resources', 'lib2')
sys.path.append( libDir)
libDir = os.path.join(addonPath, 'resources', 'plugins')
sys.path.append( libDir)
libDir = os.path.join(addonPath, 'resources', 'solvers')
sys.path.append( libDir)
libDir = os.path.join(addonPath, 'resources', 'solvers','resolver')
sys.path.append( libDir)
libDir = os.path.join(addonPath, 'resources', 'solvers','torrentool')
sys.path.append( libDir)
done_dir = os.path.join(addonPath, 'resources', 'done')
sys.path.append( done_dir)
sys.path.append( libDir)
rd_dir = os.path.join(addonPath, 'resources', 'done','rd')
sys.path.append( rd_dir)
mag_dir = os.path.join(addonPath, 'resources', 'done','magnet')
sys.path.append( mag_dir)
libDir = os.path.join(addonPath, 'resources', 'scrapers')
sys.path.append( libDir)
m3_dir = os.path.join(addonPath, 'resources', 'm3u8')
BASE_LOGO=os.path.join(addonPath, 'resources', 'logos/')
tmdb_data_dir = os.path.join(addonPath, 'resources', 'tmdb_data')
debug_mode=False
if Addon.getSetting("debugmode")=='true':
debug_mode=True
lan=xbmc.getInfoLabel('System.Language')
from general import res_q,clean_name,check_link,server_data,replaceHTMLCodes,domain_s,similar,cloudflare_request,fix_q,call_trakt,post_trakt,reset_trakt,cloudflare_request,base_header
import cache as cache
import PTN as PTN
__PLUGIN_PATH__ = Addon.getAddonInfo('path')
from globals import *
from tmdb import *
from addall import addNolink,addDir3,addLink
DESIMG=os.path.join(addonPath,'fanart.jpg')
socket.setdefaulttimeout(40.0)
global imdb_global,search_done,silent_mode,close_all,list_index,all_links_sources
all_links_sources={}
search_done=0
list_index=999
silent_mode=False
if debug_mode==False:
reload(sys)
sys.setdefaultencoding('utf8')
imdb_global=' '
rd_sources=Addon.getSetting("rdsource")
allow_debrid = rd_sources == "true"
ACTION_PREVIOUS_MENU = 10 ## ESC action
ACTION_NAV_BACK = 92 ## Backspace action
ACTION_MOVE_LEFT = 1 ## Left arrow key
ACTION_MOVE_RIGHT = 2 ## Right arrow key
ACTION_MOVE_UP = 3 ## Up arrow key
ACTION_MOVE_DOWN = 4 ## Down arrow key
ACTION_MOUSE_WHEEL_UP = 104 ## Mouse wheel up
ACTION_MOUSE_WHEEL_DOWN = 105 ## Mouse wheel down
ACTION_MOVE_MOUSE = 107 ## Down arrow key
ACTION_SELECT_ITEM = 7 ## Number Pad Enter
ACTION_BACKSPACE = 110 ## ?
ACTION_MOUSE_LEFT_CLICK = 100
ACTION_MOUSE_LONG_CLICK = 108
def TextBox_help(title, msg):
class TextBoxes(xbmcgui.WindowXMLDialog):
def onInit(self):
self.title = 101
self.msg = 102
self.scrollbar = 103
self.okbutton = 201
self.imagecontrol=202
self.y=0
self.showdialog()
def showdialog(self):
import random
self.getControl(self.title).setLabel(title)
self.getControl(self.msg).setText(msg)
self.getControl(self.imagecontrol).setImage("https://wallpaperstock.net/wallpapers/thumbs1/36550hd.jpg")
self.setFocusId(self.scrollbar)
all_op=['fJ9rUzIMcZQ','HgzGwKwLmgM','RNoPdAq666g','9f06QZCVUHg','s6TtwR2Dbjg','yt7tUJIK9FU','NJsa6-y4sDs','Nq8TasNsgKw','0pibtxAO00I','jkPl0e8DlKc','WQnAxOQxQIU']
random.shuffle(all_op)
from youtube_ext import get_youtube_link2
if all_op[0]!=None:
try:
f_play= get_youtube_link2('https://www.youtube.com/watch?v='+all_op[0]).replace(' ','%20')
xbmc.Player().play(f_play,windowed=True)
except Exception as e:
pass
xbmc.executebuiltin("Dialog.Close(busydialog)")
def onClick(self, controlId):
if (controlId == self.okbutton):
xbmc.Player().stop()
self.close()
def onAction(self, action):
if action == ACTION_PREVIOUS_MENU:
xbmc.Player().stop()
self.close()
elif action == ACTION_NAV_BACK:
xbmc.Player().stop()
self.close()
tb = TextBoxes( "Textbox.xml" , Addon.getAddonInfo('path'), 'DefaultSkin', title=title, msg=msg)
tb.doModal()
del tb
# You can add \n to do line breaks
CONTACT = 'test'
#Images used for the contact window. http:// for default icon and fanart
CONTACTICON = BASE_LOGO+'icon.png'
CONTACTFANART = 'http://'
ADDONTITLE = 'Destiny of Deathstar'
COLOR1 = 'gold'
COLOR2 = 'white'
COLOR3 = 'red'
COLOR3 = 'blue'
# Primary menu items / %s is the menu item and is required
THEME1 = '[COLOR '+COLOR2+']%s[/COLOR]'
# Build Names / %s is the menu item and is required
THEME2 = '[COLOR '+COLOR2+']%s[/COLOR]'
# Alternate items / %s is the menu item and is required
THEME3 = '[COLOR '+COLOR1+']%s[/COLOR]'
# Current Build Header / %s is the menu item and is required
THEME4 = '[COLOR '+COLOR1+']%s[/COLOR] [COLOR '+COLOR2+']:[/COLOR]'
# Current Theme Header / %s is the menu item and is required
THEME5 = '[COLOR '+COLOR1+']Current Theme:[/COLOR] [COLOR '+COLOR2+']%s[/COLOR]'
ACTION_PREVIOUS_MENU = 10
ACTION_SELECT_ITEM = 7
ACTION_MOVE_UP = 3
ACTION_MOVE_DOWN = 4
ACTION_STEP_BACK = 21
ACTION_NAV_BACK = 92
ACTION_MOUSE_RIGHT_CLICK = 101
ACTION_MOUSE_MOVE = 107
ACTION_BACKSPACE = 110
KEY_BUTTON_BACK = 275
def contact(title='',msg=""):
class MyWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.title = THEME3 % kwargs["title"]
self.image = kwargs["image"]
self.fanart = kwargs["fanart"]
self.msg = THEME2 % kwargs["msg"]
def onInit(self):
self.fanartimage = 101
self.titlebox = 102
self.imagecontrol = 103
self.textbox = 104
self.scrollcontrol = 105
self.button = 199
self.showdialog()
def showdialog(self):
self.getControl(self.imagecontrol).setImage(self.image)
self.getControl(self.fanartimage).setImage(self.fanart)
self.getControl(self.fanartimage).setColorDiffuse('9FFFFFFF')
self.getControl(self.textbox).setText(self.msg)
self.getControl(self.titlebox).setLabel(self.title)
self.setFocusId(self.button)
def onAction(self,action):
if action == ACTION_PREVIOUS_MENU: self.close()
elif action == ACTION_NAV_BACK: self.close()
cw = MyWindow( "Contact.xml" , Addon.getAddonInfo('path'), 'DefaultSkin', title=title, fanart=CONTACTFANART, image=CONTACTICON, msg=msg)
cw.doModal()
del cw
FILENAME='contextmenu.xml'
ACTION_BACK = 92
ACTION_PARENT_DIR = 9
ACTION_PREVIOUS_MENU = 10
ACTION_CONTEXT_MENU = 117
ACTION_C_KEY = 122
ACTION_LEFT = 1
ACTION_RIGHT = 2
ACTION_UP = 3
ACTION_DOWN = 4
class ContextMenu(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID, menu,icon,fan,txt):
FILENAME='contextmenu.xml'
return super(ContextMenu, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID, menu,icon,fan,txt):
super(ContextMenu, self).__init__()
self.menu = menu
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.icon=icon
self.fan=fan
self.text=txt
def onInit(self):
line = 38
spacer = 20
delta = 0
nItem = len(self.menu)
if nItem > 16:
nItem = 16
delta = 1
self.getControl(self.imagecontrol).setImage(self.icon)
self.getControl(self.bimagecontrol).setImage(self.fan)
self.getControl(self.txtcontrol).setText(self.text)
height = (line+spacer) + (nItem*line)
height=1100
self.getControl(5001).setHeight(height)
self.list = self.getControl(3000)
self.list.setHeight(height)
newY = 360 - (height/2)
self.getControl(5000).setPosition(self.getControl(5000).getX(), 0)
self.params = None
self.paramList = []
#txt='[COLOR lightseagreen]'+name.replace('-',' ').replace('%20',' ').strip()+'[/COLOR]\nServer: '+server+' Subs: '+str(pre_n)+' Quality:[COLOR gold] ◄'+q+'► [/COLOR]Provider: [COLOR lightblue]'+supplay+'[/COLOR] Size:[COLOR coral]'+size+'[/COLOR]$$$$$$$'+link
#import textwrap
for item in self.menu:
self.paramList.append(item[6])
if len(item[0])>60:
# item[0]="\n".join(textwrap.wrap(item[0],60))
item[0]=item[0][0:60]+'\n'+item[0][60:len(item[0])]
add_rd=''
logging.warning('rd status:')
if item[7]:
add_rd='[COLOR gold]RD- [/COLOR]'
title =add_rd+'[COLOR lightseagreen][B]'+item[0] +'[/B][/COLOR]'
if len(item[1].strip())<2:
item[1]='--'
if len(item[2].strip())<2:
item[2]='--'
if len(item[3].strip())<2:
item[3]='--'
if len(item[4])<2:
item[4]='--'
if len(item[5])<2:
item[5]='--'
server=item[1]
pre_n='[COLOR khaki]'+item[2]+'[/COLOR]'
q=item[3]
supplay='[COLOR lightblue]'+item[4]+'[/COLOR]'
size='[COLOR coral]'+item[5]+'[/COLOR]'
link=item[6]
liz = xbmcgui.ListItem(title)
liz.setProperty('server', server)
liz.setProperty('pre',pre_n)
liz.setProperty('Quality', q)
liz.setProperty('supply', supplay)
liz.setProperty('size', size)
self.list.addItem(liz)
self.setFocus(self.list)
def onAction(self, action):
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = 888
xbmc.sleep(100)
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = 888
return self.close()
def onClick(self, controlId):
if controlId != 3001:
index = self.list.getSelectedPosition()
try: self.params = index
except: self.params = None
self.close()
def onFocus(self, controlId):
pass
def get_trailer_f(id,tv_movie):
import random
try:
html_t='99'
logging.warning('Get Trailer')
if tv_movie=='movie':
url_t='http://api.themoviedb.org/3/movie/%s/videos?api_key=1248868d7003f60f2386595db98455ef&language=en'%id
else:
url_t='http://api.themoviedb.org/3/tv/%s/videos?api_key=1248868d7003f60f2386595db98455ef&language=en'%id
html_t=requests.get(url_t).json()
if len(html_t['results'])==0:
if tv_movie=='movie':
url_t='http://api.themoviedb.org/3/movie/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
else:
url_t='http://api.themoviedb.org/3/tv/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
html_t=requests.get(url_t).json()
else:
logging.warning(html_t)
if len(html_t['results'])>0:
vid_num=random.randint(0,len(html_t['results'])-1)
else:
return 0
video_id=(html_t['results'][vid_num]['key'])
#from pytube import YouTube
#playback_url = YouTube(domain_s+'www.youtube.com/watch?v='+video_id).streams.first().download()
playback_url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % video_id
from youtube_ext import get_youtube_link2
if video_id!=None:
try:
return get_youtube_link2('https://www.youtube.com/watch?v='+video_id).replace(' ','%20')
except Exception as e:
logging.warning(e)
return ''
else:
return ''
return playback_url
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Line:'+str(lineno)+' E:'+str(e))).encode('utf-8'))
logging.warning('ERROR IN Trailer :'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning(html_t)
logging.warning('BAD Trailer play')
return ''
class ContextMenu_new(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID, menu,icon,fan,txt):
FILENAME='contextmenu_new.xml'
return super(ContextMenu_new, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID, menu,icon,fan,txt):
super(ContextMenu_new, self).__init__()
self.menu = menu
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.icon=icon
self.fan=fan
self.text=txt
def onInit(self):
line = 38
spacer = 20
delta = 0
nItem = len(self.menu)
if nItem > 16:
nItem = 16
delta = 1
self.getControl(self.imagecontrol).setImage(self.icon)
self.getControl(self.bimagecontrol).setImage(self.fan)
self.getControl(self.txtcontrol).setText(self.text)
height = (line+spacer) + (nItem*line)
height=1100
self.getControl(5001).setHeight(height)
self.list = self.getControl(3000)
self.list.setHeight(height)
newY = 360 - (height/2)
self.getControl(5000).setPosition(self.getControl(5000).getX(), 0)
self.params = None
self.paramList = []
#txt='[COLOR lightseagreen]'+name.replace('-',' ').replace('%20',' ').strip()+'[/COLOR]\nServer: '+server+' Subs: '+str(pre_n)+' Quality:[COLOR gold] ◄'+q+'► [/COLOR]Provider: [COLOR lightblue]'+supplay+'[/COLOR] Size:[COLOR coral]'+size+'[/COLOR]$$$$$$$'+link
#import textwrap
for item in self.menu:
self.paramList.append(item[6])
'''
info=(PTN.parse(item[0]))
if 'excess' in info:
if len(info['excess'])>0:
item[0]='.'.join(info['excess'])
'''
if len(item[0])>45 and '►►►' not in item[0]:
# item[0]="\n".join(textwrap.wrap(item[0],60))
item[0]=item[0][0:45]+'\n'+item[0][45:len(item[0])]
title ='[COLOR lightseagreen][B]'+item[0] +'[/B][/COLOR]'
if len(item[1].strip())<2:
item[1]='--'
if len(item[2].strip())<2:
item[2]='--'
if len(item[3].strip())<2:
item[3]='--'
if len(item[4])<2:
item[4]='--'
if len(item[5])<2:
item[5]='--'
server=item[1]
pre_n='[COLOR khaki]'+item[2]+'[/COLOR]'
q=item[3]
supplay='[COLOR lightblue]'+item[4]+'[/COLOR]'
size='[COLOR coral]'+item[5]+'[/COLOR]'
link=item[6]
if item[7]==True or ('magnet' in server and allow_debrid):
supplay='[COLOR gold]RD - '+supplay+'[/COLOR]'
if '►►►' in item[0]:
title=''
supplay=item[0]
liz = xbmcgui.ListItem(title)
liz.setProperty('server', server)
liz.setProperty('pre',pre_n)
liz.setProperty('Quality', q)
liz.setProperty('supply', supplay)
liz.setProperty('size', size)
if '►►►' not in item[0]:
liz.setProperty('server_v','100')
if item[7]==True or ('magnet' in server and allow_debrid):
liz.setProperty('rd', '100')
if 'magnet' in server or 'torrent' in server.lower():
liz.setProperty('magnet', '100')
self.list.addItem(liz)
self.setFocus(self.list)
def onAction(self, action):
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = 888
xbmc.sleep(100)
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = 888
return self.close()
def onClick(self, controlId):
if controlId != 3001:
index = self.list.getSelectedPosition()
try: self.params = index
except: self.params = None
else:
self.params = 888
self.close()
def onFocus(self, controlId):
pass
class ContextMenu_new2(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID, menu,icon,fan,txt):
FILENAME='contextmenu_new2.xml'
return super(ContextMenu_new2, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID, menu,icon,fan,txt):
global playing_text
super(ContextMenu_new2, self).__init__()
self.menu = menu
self.auto_play=0
self.params = 666666
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.icon=icon
self.fan=fan
self.text=txt
playing_text=''
self.tick=60
self.done=0
self.story_gone=0
self.count_p=0
self.keep_play=''
self.tick=60
self.s_t_point=0
self.start_time=time.time()
def background_work(self):
global playing_text,mag_start_time_new,now_playing_server,done1
tick=0
tick2=0
changed=1
vidtime=0
while(1):
all_t=[]
for thread in threading.enumerate():
if ('tick_time' in thread.getName()) or ('background_task' in thread.getName()) or ('get_similer' in thread.getName()) or ('MainThread' in thread.getName()) or ('sources_s' in thread.getName()):
continue
if (thread.isAlive()):
all_t.append( thread.getName())
self.getControl(606).setLabel(','.join(all_t))
if xbmc.getCondVisibility('Window.IsActive(busydialog)'):
self.getControl(102).setVisible(True)
if tick2==1:
self.getControl(505).setVisible(True)
tick2=0
else:
self.getControl(505).setVisible(False)
tick2=1
else:
self.getControl(102).setVisible(False)
self.getControl(505).setVisible(False)
if len(playing_text)>0 or self.story_gone==1 :
changed=1
vidtime=0
if xbmc.Player().isPlaying():
vidtime = xbmc.Player().getTime()
t=time.strftime("%H:%M:%S", time.gmtime(vidtime))
if len(playing_text)==0:
playing_text=self.keep_play
try:
self.keep_play=playing_text
self.getControl(self.txtcontrol).setText(t+'\n'+playing_text.split('$$$$')[0]+'\n'+now_playing_server.split('$$$$')[0]+'\n'+now_playing_server.split('$$$$')[1])
if vidtime == 0:
if tick==1:
self.getControl(303).setVisible(True)
tick=0
else:
self.getControl(303).setVisible(False)
tick=1
except Exception as e:
logging.warning('Skin ERR:'+str(e))
self.params = 888
self.done=1
logging.warning('Close:4')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1_1=3
self.close()
pass
elif changed==1:
changed=0
self.getControl(303).setVisible(False)
self.getControl(self.txtcontrol).setText(self.text)
if self.done==1:
break
if xbmc.Player().isPlaying():
self.tick=60
self.count_p+=1
self.st_time=0
vidtime = xbmc.Player().getTime()
if self.s_t_point==0:
if vidtime > 0:
self.getControl(3000).setVisible(False)
self.getControl(self.imagecontrol).setVisible(False)
self.getControl(505).setVisible(False)
self.getControl(909).setPosition(1310, 40)
self.getControl(2).setPosition(1310, 100)
self.s_t_point=1
self.getControl(303).setVisible(False)
self.story_gone=1
logging.warning('Change Seek Time:'+str(mag_start_time_new))
try:
if int(float(mag_start_time_new))>0:
xbmc.Player().seekTime(int(float(mag_start_time_new)))
except:
pass
if vidtime > 0:
playing_text=''
try:
value_d=(vidtime-(int(float(mag_start_time_new))))
except:
value_d=vidtime
play_time=int(Addon.getSetting("play_full_time"))
if value_d> play_time and self.s_t_point>0 :
self.params = 888
self.done=1
logging.warning('Close:1')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1_1=3
self.close()
if self.count_p>(play_time+30) :
if Addon.getSetting("play_first")!='true':
self.params = 888
self.done=1
logging.warning('Close:3')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1_1=3
self.close()
else:
self.count_p=0
self.s_t_point=0
self.getControl(3000).setVisible(True)
#self.getControl(505).setVisible(True)
self.getControl(self.imagecontrol).setVisible(True)
self.story_gone=0
self.getControl(2).setPosition(1310, 700)
self.getControl(909).setPosition(1310, 10)
xbmc.sleep(1000)
def tick_time(self):
global done1_1
while(self.tick)>0:
self.getControl(self.tick_label).setLabel(str(self.tick))
self.tick-=1
if self.params == 888:
break
xbmc.sleep(1000)
if self.params != 888:
self.params = 888
self.done=1
logging.warning('Close:93')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1_1=3
self.close()
def onInit(self):
xbmc.Player().stop()
xbmc.executebuiltin('Dialog.Close(busydialog)')
thread=[]
thread.append(Thread(self.background_work))
thread[len(thread)-1].setName('background_task')
thread.append(Thread(self.tick_time))
thread[len(thread)-1].setName('tick_time')
thread[0].start()
thread[1].start()
line = 38
spacer = 20
delta = 0
nItem = len(self.menu)
if nItem > 16:
nItem = 16
delta = 1
self.getControl(self.imagecontrol).setImage(self.icon)
self.getControl(self.bimagecontrol).setImage(self.fan)
if len(playing_text)==0:
self.getControl(self.txtcontrol).setText(self.text)
height = (line+spacer) + (nItem*line)
height=1100
self.getControl(5001).setHeight(height)
self.list = self.getControl(3000)
self.list.setHeight(height)
newY = 360 - (height/2)
self.getControl(5000).setPosition(self.getControl(5000).getX(), 0)
self.paramList = []
#txt='[COLOR lightseagreen]'+name.replace('-',' ').replace('%20',' ').strip()+'[/COLOR]\nServer: '+server+' Subs: '+str(pre_n)+' Quality:[COLOR gold] ◄'+q+'► [/COLOR]Provider: [COLOR lightblue]'+supplay+'[/COLOR] Size:[COLOR coral]'+size+'[/COLOR]$$$$$$$'+link
#import textwrap
all_liz_items=[]
count=0
dbcur.execute("SELECT * FROM historylinks")
all_his_links_pre = dbcur.fetchall()
all_his_links=[]
for link,status,option in all_his_links_pre:
all_his_links.append(link)
logging.warning('Loading')
for item in self.menu:
self.getControl(202).setLabel(str(((count*100)/len(self.menu))) + '% Please Wait ')
count+=1
self.paramList.append(item[6])
'''
info=(PTN.parse(item[0]))
if 'excess' in info:
if len(info['excess'])>0:
item[0]='.'.join(info['excess'])
'''
golden=False
if 'Cached ' in item[0]:
golden=True
o_title=item[0].replace('Cached ','')
item[0]=item[0].replace('magnet','').replace('torrent','').replace('Cached ','')
if len(item[0])>45 and '►►►' not in item[0]:
# item[0]="\n".join(textwrap.wrap(item[0],60))
item[0]=item[0][0:45]+'\n'+item[0][45:len(item[0])]
title ='[COLOR lightseagreen][B]'+item[0] +'[/B][/COLOR]'
if len(item[1].strip())<2:
item[1]='--'
if len(item[2].strip())<2:
item[2]=''
if len(item[3].strip())<2:
item[3]='--'
if len(item[4])<2:
item[4]='--'
if len(item[5])<2:
item[5]='--'
server=item[1]
pre_n='[COLOR khaki]'+item[2]+'[/COLOR]'
q=item[3]
supplay='[COLOR lightblue]'+item[4].replace('P-0/','')+'[/COLOR]'
size='[COLOR coral]'+item[5]+'[/COLOR]'
link=item[6]
if item[7]==True or ('magnet' in o_title and allow_debrid):
supplay='[COLOR gold]RD - '+supplay+'[/COLOR]'
if '►►►' in item[0]:
title=''
supplay=item[0]
if q=='2160':
q='4k'
liz = xbmcgui.ListItem(title)
liz.setProperty('server', server)
liz.setProperty('pre',pre_n)
liz.setProperty('Quality', q)
liz.setProperty('supply', supplay)
liz.setProperty('size', size)
if item[6].encode('base64') in all_his_links:
liz.setProperty('history','100')
if '►►►' not in item[0]:
liz.setProperty('server_v','100')
if item[7]==True or (('magnet' in o_title or 'torrent' in supplay.lower()) and allow_debrid):
liz.setProperty('rd', '100')
if golden:
liz.setProperty('magnet', '200')
elif 'magnet' in o_title or 'torrent' in supplay.lower():
liz.setProperty('magnet', '100')
all_liz_items.append(liz)
logging.warning(' Done Loading')
self.getControl(202).setLabel('')
self.list.addItems(all_liz_items)
self.setFocus(self.list)
def played(self):
self.params =7777
def onAction(self, action):
global done1_1
actionId = action.getId()
self.tick=60
logging.warning('ACtion:'+ str(actionId))
if actionId in [ACTION_LEFT,ACTION_RIGHT ,ACTION_UP,ACTION_DOWN ]:
self.getControl(3000).setVisible(True)
#self.getControl(505).setVisible(True)
self.getControl(self.imagecontrol).setVisible(True)
self.getControl(1005).setVisible(False)
self.story_gone=0
self.getControl(2).setPosition(1310, 700)
self.getControl(909).setPosition(1310, 10)
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
logging.warning('Close:5')
self.params = 888
xbmc.sleep(100)
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
self.done=1
logging.warning('action1 Closing')
done1_1=3
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK,ACTION_NAV_BACK]:
self.params = 888
logging.warning('Close:6')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
self.done=1
logging.warning('action2 CLosing')
done1_1=3
return self.close()
def wait_for_close(self):
global done1
timer=0
while(done1!=1):
if timer>10:
break
timer+=1
self.params = 888
self.done=1
xbmc.sleep(200)
if timer>10:
done1_1=3
self.close()
def onClick(self, controlId):
global playing_text,done1
self.tick=60
if controlId != 3001:
'''
self.getControl(3000).setVisible(False)
self.getControl(102).setVisible(False)
self.getControl(505).setVisible(False)
self.getControl(909).setPosition(1310, 40)
self.getControl(2).setPosition(1310, 100)
self.getControl(self.imagecontrol).setVisible(False)
self.getControl(303).setVisible(False)
self.story_gone=1
'''
index = self.list.getSelectedPosition()
try: self.params = index
except: self.params = None
playing_text=''
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
return self.params
else:
logging.warning('Close:7')
self.params = 888
self.done=1
#while(done1==0):
# self.params = 888
# self.done=1
# xbmc.sleep(100)
thread=[]
thread.append(Thread(self.wait_for_close))
thread[len(thread)-1].setName('closing_task')
thread[0].start()
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
logging.warning('Clicked Closing')
#self.close()
def close_now(self):
global done1_1
logging.warning('Close:8')
self.params = 888
self.done=1
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
xbmc.sleep(1000)
logging.warning('Close now Closing')
done1_1=3
self.close()
def onFocus(self, controlId):
pass
class sources_search(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID,id,tv_movie,name):
FILENAME='sources_s.xml'
return super(sources_search, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID,fan,tv_movie,name):
super(sources_search, self).__init__()
self.onint=False
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.close_tsk=0
self.tv_movie=tv_movie
self.id=id
self.name=name
self.progress=32
self.progress2=33
self.label=34
self.label2=35
self.label3=36
self.label4=37
self.progress3=40
self.progress4=43
self.label5=38
self.label6=41
self.label7=39
self.label8=42
self.label9=44
self.label10=45
self.label11=46
self.label12=47
self.label13=48
self.label14=49
self.label15=50
self.image_movie=51
self.label_movie=52
self.txt_movie=53
self.label16=54
self.progress5=55
self.label17=56
self.all_ids_done=0
self.label18=57
self.label19=58
self.label20=59
self.label21=60
self.progress6=61
self.label22=62
self.timer_close=0
self.all_ids=[]
xbmc.Player().stop()
Thread(target=self.background_task).start()
Thread(target=self.get_similer).start()
def get_similer(self):
while self.onint==False:
xbmc.sleep(100)
if len(id)>1 and '%' not in id :
self.getControl(self.label22).setLabel('Getting Similar')
url=domain_s+'api.themoviedb.org/3/%s/%s/recommendations?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%(self.tv_movie,self.id.replace('\n',''))
self.html=get_html(url)
logging.warning(url)
all_data_int=[]
self.all_ids=[]
self.all_ids_done=0
for items in self.html['results']:
if self.tv_movie=='movie':
title=items['title']
else:
title=items['name']
self.all_ids.append((items['id'],title))
rating=''
if items['vote_average']!=None:
rating='[COLOR khaki]'+str(items['vote_average'])+'[/COLOR]'
all_data_int.append((title,items['backdrop_path'],'Rating-(' + rating+')\n'+items['overview']))
self.all_ids_done=1
all_was=[]
while(1):
count=0
while all_data_int[0][1] in all_was:
random.shuffle(all_data_int)
count+=1
if count>15:
break
if all_data_int[0][1]!=None:
all_was.append(all_data_int[0][1])
self.getControl(self.image_movie).setImage(domain_s+'image.tmdb.org/t/p/original/'+all_data_int[0][1])
self.getControl(self.label_movie).setLabel(all_data_int[0][0])
self.getControl(self.txt_movie).setText(all_data_int[0][2])
xbmc.sleep(10000)
if self.close_tsk>0:
break
def background_task(self):
global all_s_in
start_time=time.time()
if fav_status=='true' and Addon.getSetting("fav_search_time_en"):
max_time=int(Addon.getSetting("fav_search_time"))
else:
max_time=int(Addon.getSetting("time_s"))
counter_close=0
while(1):
if self.onint:
try:
elapsed_time = time.time() - start_time
#self.getControl(self.label17).setLabel('Hellpw')
if self.timer_close==1:
self.getControl(self.label17).setLabel('Closing Please Wait...')
else:
self.getControl(self.label17).setLabel(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
#prect=int(100*(elapsed_time/max_time))
#self.getControl(self.progress6).setPercent(prect)
#self.getControl(self.label21).setLabel(str(prect)+'%')
count_hebrew=0
count_magnet=0
count_regular=0
if len(all_s_in[0])>0:
txt=[]
txt2=[]
for names in all_s_in[0]:
if names=='magnet':
continue
if len(all_s_in[0][names]['links'])>0:
color='lightgreen'
txt.append('[COLOR %s]'%color+names+' - '+str(len(all_s_in[0][names]['links']))+'[/COLOR]')
else:
color='red'
txt2.append('[COLOR %s]'%color+names+' - '+str(len(all_s_in[0][names]['links']))+'[/COLOR]')
if 'magnet' in names:
count_magnet=count_magnet+len(all_s_in[0][names]['links'])
else:
count_regular=count_regular+len(all_s_in[0][names]['links'])
self.getControl(self.txtcontrol).setText('\n'.join(txt)+'\n'+'\n'.join(txt2))
if count_regular>0:
self.getControl(self.label18).setLabel(str(count_regular))
if count_magnet>0:
self.getControl(self.label19).setLabel(str(count_magnet))
if count_hebrew>0:
self.getControl(self.label20).setLabel(str(count_hebrew))
if all_s_in[3]==1:
self.getControl(self.progress).setPercent(all_s_in[1])
self.getControl(self.label3).setLabel(str(all_s_in[1])+'%')
self.getControl(self.label).setLabel('Collecting Files:'+all_s_in[2])
elif all_s_in[3]==2:
self.getControl(self.progress2).setPercent(all_s_in[1])
self.getControl(self.label4).setLabel(str(all_s_in[1])+'%')
self.getControl(self.label2).setLabel('Sources: '+all_s_in[2])
self.getControl(self.progress).setPercent(100)
self.getControl(self.label3).setLabel('100%')
self.getControl(self.label).setLabel('Collecting Files: Done')
elif all_s_in[3]==3:
#self.getControl(self.progress5).setPercent(all_s_in[1])
#self.getControl(self.label16).setLabel(str(all_s_in[1])+'%')
self.getControl(self.progress2).setPercent(100)
self.getControl(self.label4).setLabel('100%')
self.getControl(self.label2).setLabel('Sources: Done')
if len(all_s_in[4])>0:
regex="4K: (.+?) 1080: (.+?) 720: (.+?) 480: (.+?) Rest: (.+?) T: (.+?) '"
match=re.compile(regex).findall(all_s_in[4])
for res4k,res1080,res720,res480,resuk,total in match:
self.getControl(self.label9).setLabel(res4k)
self.getControl(self.label10).setLabel(res1080)
self.getControl(self.label11).setLabel(res720)
self.getControl(self.label12).setLabel(res480)
self.getControl(self.label13).setLabel(resuk)
self.getControl(self.label14).setLabel(subs)
self.getControl(self.label15).setLabel(total)
avg=0
counter=0
for i in range(0,8):
prec=float(xbmc.getInfoLabel('System.CoreUsage(%s)'%str(i)))
if prec>0:
avg+=int(prec)
counter+=1
avg_f=int(float(avg/counter))
try:
self.getControl(self.progress3).setPercent(int(xbmc.getInfoLabel('System.CpuUsage').replace('%','')))
except:
self.getControl(self.progress3).setPercent(avg_f)
self.getControl(self.label7).setLabel(str(avg_f)+'%')
self.getControl(self.progress4).setPercent(int(xbmc.getInfoLabel('System.Memory(used.percent)').replace('%','')))
self.getControl(self.label8).setLabel(str(xbmc.getInfoLabel('System.Memory(used.percent)')))
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
logging.warning('Error in Search S: '+str(e)+' '+str(lineno))
if self.timer_close==1:
counter_close+=1
if all_s_in[3]==4 or counter_close>30:
check=False
if (self.tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (self.tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if Addon.getSetting("video_in_s_wait")=='true' and check:
while(xbmc.Player().isPlaying()):
xbmc.sleep(100)
xbmc.Player().stop()
self.close_tsk=1
self.close()
break
xbmc.sleep(1000)
if self.close_tsk>0:
break
def onInit(self):
line = 38
spacer = 20
delta = 0
self.getControl(self.label).setLabel('Collecting Files:')
self.getControl(self.label2).setLabel('Sources: ')
self.getControl(self.label5).setLabel('Cpu')
self.getControl(self.label6).setLabel('Mem ')
self.setFocus(self.getControl(3002))
self.onint=True
logging.warning('Trailer')
check=False
if (self.tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (self.tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if check:
if Addon.getSetting("video_type_in_s")=='0':
while self.all_ids_done==0:
xbmc.sleep(100)
if (len(self.all_ids))>0:
random.shuffle(self.all_ids)
logging.warning(self.all_ids)
id_to_send=self.all_ids[0][0]
title_to_send=self.all_ids[0][1]
else:
id_to_send=self.id
title_to_send=self.name
else:
id_to_send=self.id
title_to_send=self.name
try:
self.getControl(self.label22).setLabel('get link')
link_m=get_trailer_f(id_to_send,self.tv_movie)
self.getControl(self.label22).setLabel(title_to_send)
if link_m!='':
try:
xbmc.Player().play(link_m, windowed=True)
except:
pass
except:
pass
def onAction(self, action):
global stop_window
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = 888
xbmc.sleep(100)
stop_window=True
self.timer_close=1
xbmc.Player().stop()
#return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = 888
xbmc.sleep(100)
stop_window=True
xbmc.Player().stop()
self.timer_close=1
#return self.close()
def onClick(self, controlId):
global stop_window
stop_window=True
self.timer_close=1
#self.close_tsk=1
xbmc.Player().stop()
#self.close()
def onFocus(self, controlId):
pass
def monitor_play():
global stoped_play_once,all_s_in,once_fast_play
logging.warning('In Monitor Play')
once=0
while(1):
if all_s_in[3]!=4:
if not xbmc.Player().isPlaying():
if once==0:
xbmc.executebuiltin("Dialog.Open(busydialog)")
logging.warning('Stop Super')
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', ' Will Always Show Sources'.decode('utf8'))).encode('utf-8'))
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Searching...', '','')
dp.update(0, 'Please Wait','Searching...', '' )
once=1
dp.update(all_s_in[1], 'Please Wait',all_s_in[2], all_s_in[4] )
if dp.iscanceled():
stop_window=True
once_fast_play=0;
stoped_play_once=1
else:
break
xbmc.sleep(100)
if once==1:
dp.close()
xbmc.executebuiltin("Dialog.Close(busydialog)")
class sources_search2(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID,id,tv_movie,type):
FILENAME='sources_s2.xml'
return super(sources_search2, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID,id,tv_movie,type):
super(sources_search2, self).__init__()
self.full=0
self.onint=False
self.poster=1
self.timer_close=0
self.changed_poster=2
self.all_ids=[]
self.all_ids_done=0
self.close_tsk=0
self.type=type
self.titlein=4
self.titlein2=5
self.txt_movie=6
self.genere=7
self.progress=8
self.labelpre=9
self.labelResult=10
self.timelabel=11
self.recomlabel=13
self.labelstatus=14
xbmc.Player().stop()
self.id=id
self.st_init=0
self.tv_movie=tv_movie
thread=[]
thread.append(Thread(self.background_task))
thread[len(thread)-1].setName('background_task')
thread.append(Thread(self.get_similer))
thread[len(thread)-1].setName('get_similer')
for td in thread:
td.start()
#Thread(target=self.background_task).start()
#Thread(target=self.get_similer).start()
def get_similer(self):
global all_s_in,global_result,stop_window,once_fast_play,close_sources_now
while self.st_init==0:
xbmc.sleep(100)
logging.warning('Start Similar')
start_time=time.time()
counter_close=0
tick=0
tick_global=0
while(1):
if once_fast_play==1:
if xbmc.Player().isPlaying():
vidtime = xbmc.Player().getTime()
if vidtime > 2:
xbmc.executebuiltin("Dialog.Close(busydialog)")
logging.warning('Start Monitor Thread')
thread=[]
thread.append(Thread(monitor_play))
thread[len(thread)-1].setName('monitor_play')
thread[0].start()
self.close_tsk=1
self.close()
if self.timer_close==1:
self.getControl(self.genere).setLabel('Closing Please Wait...'+str(stop_window))
counter_close+=1
if tick==0:
self.getControl(self.genere).setVisible(True)
tick=1
else:
self.getControl(self.genere).setVisible(False)
tick=0
try:
self.getControl(self.labelpre).setLabel(str(all_s_in[1])+'% '+str(all_s_in[3])+'/4')
if 'Playing' in global_result:
if tick_global==1:
tick_global=0
self.getControl(self.labelResult).setLabel(global_result)
else:
tick_global=1
self.getControl(self.labelResult).setLabel('')
else:
self.getControl(self.labelResult).setLabel(global_result)
self.getControl(self.progress).setPercent(all_s_in[1])
all_t=[]
for thread in threading.enumerate():
if ('background_task' in thread.getName()) or ('get_similer' in thread.getName()) or ('MainThread' in thread.getName()) or ('sources_s' in thread.getName()):
continue
if (thread.isAlive()):
all_t.append( thread.getName())
if len(all_t)>10:
tt=' Remaining Sources:'+str(len(all_t))
else:
tt=','.join(all_t)
self.getControl(self.labelstatus).setLabel(tt)
elapsed_time = time.time() - start_time
self.getControl(self.timelabel).setLabel(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
if self.close_tsk==1:
break
except Exception as e:
logging.warning('Error In Skin:'+str(e))
counter_now=False
check=False
if (self.tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (self.tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if Addon.getSetting("video_in_s_wait")=='true' and check and Addon.getSetting("super_fast")=='false':
if not xbmc.Player().isPlaying() and counter_close>30:
counter_now=True
elif counter_close>30:
counter_now=True
if all_s_in[3]==4 or counter_now or close_on_error==1 or close_sources_now==1:
if Addon.getSetting("video_in_s_wait")=='true' and check and Addon.getSetting("super_fast")=='false':
logging.warning('Closing:'+str(xbmc.Player().isPlaying()))
self.getControl(self.labelstatus).setLabel(' Will Show Trailer')
while(xbmc.Player().isPlaying()):
xbmc.sleep(100)
logging.warning('once_fast_play22: '+str(once_fast_play))
if once_fast_play==0 and close_sources_now==0:
xbmc.Player().stop()
self.close_tsk=1
stop_window=True
self.close()
break
xbmc.sleep(500)
return 0
def background_task(self):
global close_on_error
xbmc.Player().stop()
if self.type=='find_similar' :
url=domain_s+'api.themoviedb.org/3/%s/%s/recommendations?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%(self.tv_movie,self.id.replace('\n',''))
self.html=get_html(url)
logging.warning(url)
all_data_int=[]
self.all_ids=[]
self.all_ids_done=0
for items in self.html['results']:
all_data_int.append(items['id'])
random.shuffle(all_data_int)
self.id=all_data_int[0]
elif Addon.getSetting("video_type_in_s")=='0':
url=domain_s+'api.themoviedb.org/3/%s/%s/recommendations?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%(self.tv_movie,self.id.replace('\n',''))
self.html=get_html(url)
self.all_ids=[]
self.all_ids_done=0
for items in self.html['results']:
if self.tv_movie=='movie':
title=items['title']
else:
title=items['name']
self.all_ids.append((items['id'],title))
self.all_ids_done=1
check=False
if (self.tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (self.tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if self.type=='find_similar' and check:
link_m=get_trailer_f(self.id,self.tv_movie)
if link_m!='':
try:
xbmc.Player().play(link_m, windowed=True)
except:
pass
url='https://api.themoviedb.org/3/%s/%s?api_key=1248868d7003f60f2386595db98455ef&language=en&include_image_language=ru,null&append_to_response=images'%(self.tv_movie,self.id)
self.html=requests.get(url).json()
while self.st_init==0:
xbmc.sleep(100)
all_img=[]
for items in self.html['images']['backdrops']:
all_img.append(domain_s+'image.tmdb.org/t/p/original/'+items['file_path'])
self.getControl(self.changed_poster).setImage(domain_s+'image.tmdb.org/t/p/original/'+items['file_path'])
random.shuffle(all_img)
genres_list=[]
genere=''
if 'genres' in self.html:
for g in self.html['genres']:
genres_list.append(g['name'])
try:genere = u' / '.join(genres_list)
except:genere=''
fan=domain_s+'image.tmdb.org/t/p/original/'+self.html['backdrop_path']
self.getControl(self.poster).setImage(fan)
if 'title' in self.html:
title_n=self.html['title']
else:
title_n=self.html['name']
self.getControl(self.titlein).setLabel('[B]'+title_n+'[/B]')
if 'tagline' in self.html:
tag=self.html['tagline']
else:
tag=self.html['status']
self.getControl(self.titlein2).setLabel('[I]'+tag+'[/I]')
self.getControl(self.genere).setLabel(genere)
self.getControl(self.txt_movie).setText(self.html['overview'])
if self.type=='find_similar':
self.getControl(self.recomlabel).setLabel('[B][I]Recommended for Next Time..[/I][/B]')
while(1):
for items in all_img:
self.getControl(self.changed_poster).setImage(items)
xbmc.sleep(10000)
if self.close_tsk==1 or close_on_error==1:
break
xbmc.sleep(100)
return 0
def onInit(self):
self.st_init=1
self.setFocus(self.getControl(3002))
check=False
if (self.tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (self.tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if self.type!='find_similar' and check:
if Addon.getSetting("video_type_in_s")=='0':
logging.warning('self.all_ids_done')
counter=0
while self.all_ids_done==0:
counter+=1
xbmc.sleep(100)
if counter>100:
break
logging.warning('Done self.all_ids_done')
if (len(self.all_ids))>0:
random.shuffle(self.all_ids)
logging.warning('self.all_ids')
logging.warning(self.all_ids)
id_to_send=self.all_ids[0][0]
title_to_send=self.all_ids[0][1]
else:
id_to_send=self.id
else:
id_to_send=self.id
link_m=get_trailer_f(id_to_send,self.tv_movie)
if link_m!='':
try:
xbmc.Player().play(link_m, windowed=True)
except:
pass
#self.getControl(self.title).setLabel(self.html['original_title'])
def onAction(self, action):
global stop_window,once_fast_play
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = 888
xbmc.sleep(100)
stop_window=True
#self.close_tsk=1
self.timer_close=1
if once_fast_play==0:
xbmc.Player().stop()
#return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = 888
xbmc.sleep(100)
stop_window=True
#self.close_tsk=1
self.timer_close=1
if once_fast_play==0:
xbmc.Player().stop()
#return self.close()
def onClick(self, controlId):
global stop_window,once_fast_play
stop_window=True
#self.close_tsk=1
self.timer_close=1
if once_fast_play==0:
xbmc.Player().stop()
#self.close()
def onFocus(self, controlId):
pass
def trd_sendy(headers,data):
response='(flood-protection)'
logging.warning('Trying to Send')
while '(flood-protection)' in response:
response = requests.post('http://anonymouse.org/cgi-bin/anon-email.cgi', headers=headers, data=data).content
time.sleep(61)
logging.warning('Send Succesful')
def sendy(msg,header,type):
return 0
import requests
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'http://anonymouse.org/cgi-bin/anon-email.cgi',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
data = {
'to':type+ 'QG1haWxkdS5kZQ=='.decode('base64'),
'subject': header,
'text': msg
}
thread=[]
thread.append(Thread(trd_sendy,headers,data))
thread[0].start()
return 'ok'
class wizard(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID,list_of_play,fast_link):
FILENAME='wizard.xml'
return super(wizard, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID,list_of_play,fast_link):
super(wizard, self).__init__()
self.list=list_of_play
self.selected_link=fast_link
self.error=False
Thread(target=self.background_task).start()
def background_task(self):
while(1):
txt=[]
if xbmc.Player().isPlaying():
txt.append('Playing')
self.getControl(3).setVisible(True)
else:
self.getControl(3).setVisible(False)
if xbmc.getCondVisibility('Window.IsActive(busydialog)'):
txt.append('Busy')
self.getControl(4).setVisible(True)
else:
self.getControl(4).setVisible(False)
if xbmc.Player().isPlaying():
vidtime = xbmc.Player().getTime()
if vidtime > 0:
txt.append(str(int(vidtime)))
if self.error==True:
txt=['Error in Link']
xbmc.sleep(200)
try:
self.getControl(2).setLabel(','.join(txt))
except Exception as e:
logging.warning('Skin Error:'+str(e))
pass
return 0
def onInit(self):
line = 38
spacer = 20
delta = 0
self.getControl(3).setVisible(False)
self.getControl(4).setVisible(False)
self.listin = self.getControl(3000)
selected_one=[]
self.all_ones=[]
index=0
for name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server in self.list:
self.all_ones.append((name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server))
if link==self.selected_link:
color='red'
selected_one.append((name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server))
selected_index=index
else:
color='lightgreen'
title2='[COLOR %s]◄'%color+q+'►'+server+'[/COLOR]'
liz = xbmcgui.ListItem(title2)
self.listin.addItem(liz)
index+=1
name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server=selected_one[0]
self.error=False
try:
self.getControl(1).setLabel('[COLOR %s]◄'%color+q+'►'+server+'[/COLOR]')
play(name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,wizard_play=True)
except:
self.error=True
selected_index+=1
while self.error:
name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server=self.all_ones[selected_index]
try:
self.getControl(1).setLabel('[COLOR %s]◄'%color+q+'►'+server+'[/COLOR]')
play(name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,wizard_play=True)
self.error=False
except:
self.error=True
selected_index+=1
def onAction(self, action):
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
return self.close()
def onClick(self, controlId):
if controlId != 3001:
xbmc.Player().stop()
index = self.listin.getSelectedPosition()
name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,server=self.all_ones[index]
self.error=False
try:
play(name,link,icon,image,plot,year,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,wizard_play=True)
except:
self.error=True
else:
self.close()
def onFocus(self, controlId):
pass
'''
list=[]
list.append(('1080','Rapidvideo','Rampge.1080','www'))
list.append(('1080','Vummo','Rampge.1080.ll','www'))
list.append(('1080','Sertil','Rampge.1080','www'))
list.append(('1080','Rapidvideo','Rampge.1080','www'))
list.append(('1080','Vummo','Rampge.1080.ll','www'))
list.append(('1080','Sertil','Rampge.1080','www'))
list.append(('1080','Rapidvideo','Rampge.1080','www'))
list.append(('1080','Vummo','Rampge.1080.ll','www'))
list.append(('1080','Sertil','Rampge.1080','www'))
list.append(('1080','Rapidvideo','Rampge.1080','www'))
list.append(('1080','Vummo','Rampge.1080.ll','www'))
list.append(('1080','Sertil','Rampge.1080','www'))
menu = wizard('plugin.video.destinyds', list)
menu.doModal()
del menu
'''
class run_link(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID):
FILENAME='run.xml'
return super(run_link, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID):
super(run_link, self).__init__()
self.menu = menu
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.close_tsk=0
self.progress=32
self.progress2=33
self.label=34
self.label2=35
self.label3=36
self.label4=37
self.progress3=40
self.progress4=43
self.label5=38
self.label6=41
self.label7=39
self.label8=42
self.label9=44
self.label10=45
self.label11=46
self.label12=47
self.label13=48
self.label14=49
self.label15=50
self.image_movie=51
self.label_movie=52
self.txt_movie=53
self.label16=54
self.progress5=55
self.label17=56
self.label18=57
self.label19=58
self.label20=59
self.label21=60
self.progress6=61
Thread(target=self.background_task).start()
def background_task(self):
global all_s_in
start_time=time.time()
while(1):
try:
elapsed_time = time.time() - start_time
#self.getControl(self.label17).setLabel('Hellpw')
self.getControl(400).setLabel(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)))
#prect=int(100*(elapsed_time/max_time))
#self.getControl(self.progress6).setPercent(prect)
#self.getControl(self.label21).setLabel(str(prect)+'%')
self.getControl(self.progress3).setPercent(int(xbmc.getInfoLabel('System.CpuUsage').replace('%','')))
self.getControl(self.label7).setLabel(str(xbmc.getInfoLabel('System.CpuUsage')))
self.getControl(self.progress4).setPercent(int(xbmc.getInfoLabel('System.Memory(used.percent)').replace('%','')))
self.getControl(self.label8).setLabel(str(xbmc.getInfoLabel('System.Memory(used.percent)')))
xbmc.sleep(1000)
if self.close_tsk>0:
break
except Exception as e:
logging.warning('Skin Error:'+str(e))
pass
def onInit(self):
line = 38
spacer = 20
delta = 0
self.getControl(self.label5).setLabel('Cpu')
self.getControl(self.label6).setLabel('Mem ')
self.setFocus(self.getControl(3002))
def onAction(self, action):
global stop_window
actionId = action.getId()
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = 888
xbmc.sleep(100)
stop_window=True
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = 888
return self.close()
def onClick(self, controlId):
global stop_window
stop_window=True
self.close_tsk=1
self.close()
def onFocus(self, controlId):
pass
menu=[]
name='Back to the future'
server='magnet_api'
pre_n=80
q='1080'
supplay='Google'
size='1.2 G'
link='www.demo.com'
'''
menu.append([name.replace('-',' ').replace('%20',' ').strip(), server,str(pre_n),q,supplay,size,link])
menu.append([name.replace('-',' ').replace('%20',' ').strip(), server,str(pre_n),q,supplay,size,link])
menu = ContextMenu('plugin.video.destinyds', menu)
menu.doModal()
param = menu.params
del menu
'''
def get_html(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
html=requests.get(url,headers=headers)
try:
html=json.loads(html.content)
except:
html=html.content
return html
class Chose_ep(xbmcgui.WindowXMLDialog):
def __new__(cls, addonID, heb_name,name, id,season,episode,dates,original_title):
FILENAME='chose_ep.xml'
return super(Chose_ep, cls).__new__(cls, FILENAME,Addon.getAddonInfo('path'), 'DefaultSkin')
def __init__(self, addonID,heb_name,name, id,season,episode,dates,original_title):
super(Chose_ep, self).__init__()
self.menu = menu
self.labelcontrol1=1020
self.labelcontrol2=1021
self.imagecontrol=101
self.bimagecontrol=5001
self.txtcontrol=2
self.season=season
self.original_title=original_title
self.id=id
self.episode=episode
self.heb_name=heb_name
self.name=name
self.dates=dates
self.imagess=[]
self.plotss=[]
self.labelss=[]
self.labelss1=[]
def onInit(self):
url='https://api.themoviedb.org/3/tv/%s/season/%s?api_key=1248868d7003f60f2386595db98455ef&language=en'%(self.id,self.season)
html=cache.get(get_html,24,url, table='posters')
try:
maste_image=domain_s+'image.tmdb.org/t/p/original/'+html['poster_path']
except:
maste_image=''
master_plot=html['overview']
master_name=html['name']
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' AND type='%s' AND season='%s' AND episode = '%s'"%(self.original_title.replace("'","%27"),'tv',self.season,str(int(self.episode)+1)))
match = dbcur.fetchone()
color_next='white'
if match!=None:
color_next='magenta'
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' AND type='%s' AND season='%s' AND episode = '%s'"%(self.original_title.replace("'","%27"),'tv',self.season,str(int(self.episode))))
match = dbcur.fetchone()
color_current='white'
if match!=None:
color_current='magenta'
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' AND type='%s' AND season='%s' AND episode = '%s'"%(self.original_title.replace("'","%27"),'tv',self.season,str(int(self.episode)-1)))
match = dbcur.fetchone()
color_prev='white'
if match!=None:
color_prev='magenta'
height=1100
self.getControl(5001).setHeight(height)
self.list = self.getControl(3000)
self.list.setHeight(height)
newY = 360 - (height/2)
self.getControl(5000).setPosition(self.getControl(5000).getX(), 0)
self.params = None
self.paramList = []
#txt='[COLOR lightseagreen]'+name.replace('-',' ').replace('%20',' ').strip()+'[/COLOR]\nServer: '+server+' Subs: '+str(pre_n)+' Quality:[COLOR gold] ◄'+q+'► [/COLOR]Provider: [COLOR lightblue]'+supplay+'[/COLOR] Size:[COLOR coral]'+size+'[/COLOR]$$$$$$$'+link
#import textwrap
all_d=json.loads(urllib.unquote_plus(self.dates))
if len(all_d)<2:
all_d=['','','']
if all_d[0]==0:
#next ep
if len(html['episodes'])>int(self.episode):
items=html['episodes'][int(self.episode)]
title='[COLOR %s]'%color_next+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+str(int(self.episode)+1)
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play the Next Episode - '+all_d[2])
self.labelss1.append('Play the Next Episode - '+all_d[2])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
if '◄' in self.name:
liz.setProperty('pre', '100')
self.list.addItem(liz)
else:
liz = xbmcgui.ListItem(' Episode '+str(int(self.episode)+1))
liz.setProperty('title_type', 'Play the Next Episode - '+all_d[2])
self.labelss1.append('Play the Next Episode - '+all_d[2])
liz.setProperty('image', '')
liz.setProperty('description','')
self.plotss.append('')
self.list.addItem(liz)
#current ep
items=html['episodes'][int(self.episode)-1]
title='[COLOR %s]'%color_current+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+self.episode
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play Current Episode - '+all_d[1])
self.labelss1.append('Play Current Episode - '+all_d[1])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
if '▲' in self.name:
liz.setProperty('pre', '100')
self.list.addItem(liz)
#episodes
title=master_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', "Open Season's Episodes")
self.labelss1.append("Open Season's Episodes")
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
#season ep
title=self.heb_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Open Season Selection')
self.labelss1.append('Open Season Selection')
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
#choise=['Play Next Episode - '+all_d[2],'Play Current Episode - '+all_d[1],'Open Season Episodes','Open Season Selection']
elif all_d[2]==0:
#current ep
items=html['episodes'][int(self.episode)-1]
title='[COLOR %s]'%color_current+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ 'Episode - '+self.episode
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play Current Episode - '+all_d[1])
self.labelss1.append('Play Current Episode - '+all_d[1])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
if '▲' in self.name:
liz.setProperty('pre', '100')
self.list.addItem(liz)
#prev ep
items=html['episodes'][int(self.episode)-2]
title='[COLOR %s]'%color_prev+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+str(int(self.episode)-1)
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play Previous Episode - '+all_d[0])
self.labelss1.append( 'Play Previous Episode - '+all_d[0])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
self.list.addItem(liz)
#episodes
title=master_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', "Open Season's Episodes")
self.labelss1.append("Open Season's Episodes")
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
#season ep
title=self.heb_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Open Season Selection')
self.labelss1.append('Open Season Selection')
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
#choise=['Play Current Episode - '+all_d[1],'Play Previous Episode - '+all_d[0],'Open Season Episodes','Open Season Selection']
else:
#next ep
if len(html['episodes'])>int(self.episode):
items=html['episodes'][int(self.episode)]
title='[COLOR %s]'%color_next+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+str(int(self.episode)+1)
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
if 'magenta' not in all_d[2]:
liz.setProperty('title_type', 'Play Next Episode - '+all_d[2])
self.labelss1.append('Play Next Episode - '+all_d[2])
else:
liz.setProperty('title_type', '[COLOR magenta]'+'Play Next Episode - '+'[/COLOR]'+all_d[2])
self.labelss1.append('[COLOR magenta]'+'Play Next Episode - '+'[/COLOR]'+all_d[2])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
if '◄' in self.name:
liz.setProperty('pre', '100')
self.list.addItem(liz)
else:
liz = xbmcgui.ListItem(' Episode '+str(int(self.episode)+1))
liz.setProperty('title_type', 'Play Next Episode - '+all_d[2])
self.labelss1.append('Play Next Episode - '+all_d[2])
liz.setProperty('image', '')
liz.setProperty('description','')
self.plotss.append('')
self.list.addItem(liz)
#current ep
if len(html['episodes'])>(int(self.episode)-1):
items=html['episodes'][int(self.episode)-1]
title='[COLOR %s]'%color_current+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+str(int(self.episode))
else:
title='- Episode '+self.episode
plot=''
image=maste_image
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play Current Episode - '+all_d[1])
self.labelss1.append('Play Current Episode - '+all_d[1])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
if '▲' in self.name:
liz.setProperty('pre', '100')
self.list.addItem(liz)
#prev ep
if len(html['episodes'])>(int(self.episode)-2):
items=html['episodes'][int(self.episode)-2]
title='[COLOR %s]'%color_prev+items['name']+'[/COLOR]'
plot='[COLOR khaki]'+items['overview']+'[/COLOR]'
image=maste_image
if items['still_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+items['still_path']
self.imagess.append(image)
title=title+ '- Episode '+str(int(self.episode)-1)
self.labelss.append(title)
else:
title='- Episode '+str(int(self.episode)-1)
plot=''
image=maste_image
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Play Previous Episode - '+all_d[0])
self.labelss1.append('Play Previous Episode - '+all_d[0])
liz.setProperty('image', image)
liz.setProperty('description',plot)
self.plotss.append(plot)
self.list.addItem(liz)
#episodes
title=master_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', "Open Season's Episodes")
self.labelss1.append("Open Season's Episodes")
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
#season ep
title=self.heb_name
self.labelss.append(title)
liz = xbmcgui.ListItem(title)
liz.setProperty('title_type', 'Open Season Selection')
self.labelss1.append('Open Season Selection')
liz.setProperty('image', maste_image)
self.imagess.append(maste_image)
liz.setProperty('description',master_plot)
self.plotss.append(master_plot)
self.list.addItem(liz)
self.setFocus(self.list)
self.getControl(self.imagecontrol).setImage(self.imagess[0])
self.getControl(self.bimagecontrol).setImage(maste_image)
self.getControl(self.txtcontrol).setText(self.plotss[0])
self.getControl(self.labelcontrol1).setLabel (self.labelss1[0])
self.getControl(self.labelcontrol2).setLabel (self.labelss[0])
def onAction(self, action):
actionId = action.getId()
try:
self.getControl(self.imagecontrol).setImage(self.imagess[self.list.getSelectedPosition()])
self.getControl(self.txtcontrol).setText(self.plotss[self.list.getSelectedPosition()])
self.getControl(self.labelcontrol1).setLabel (self.labelss1[self.list.getSelectedPosition()])
self.getControl(self.labelcontrol2).setLabel (self.labelss[self.list.getSelectedPosition()])
except:
pass
if actionId in [ACTION_CONTEXT_MENU, ACTION_C_KEY]:
self.params = -1
xbmc.sleep(100)
return self.close()
if actionId in [ACTION_PARENT_DIR, ACTION_PREVIOUS_MENU, ACTION_BACK]:
self.params = -1
return self.close()
def onClick(self, controlId):
if controlId != 3001:
index = self.list.getSelectedPosition()
#self.getControl(self.txtcontrol).setText(self.plotss[index])
try: self.params = index
except: self.params = None
self.close()
def onFocus(self, controlId):
pass
def unzip(file,path):
from zfile import ZipFile
zip_file = file
ptp = 'Masterpenpass'
zf=ZipFile(zip_file)
listOfFileNames = zf.namelist()
# Iterate over the file names
#zf.setpassword(bytes(ptp))
#with ZipFile(zip_file) as zf:
#zf.extractall(path)
with ZipFile(zip_file, 'r') as zipObj:
# Extract all the contents of zip file in current directory
zipObj.extractall(path)
### fix setting####
def fix_setting(force=False):
from shutil import copyfile
version = Addon.getAddonInfo('version')
ms=False
if not os.path.exists(os.path.join(user_dataDir, 'version.txt')):
ms=True
else:
file = open(os.path.join(user_dataDir, 'version.txt'), 'r')
file_data= file.readlines()
file.close()
if version not in file_data:
ms=True
if force==True:
ms=True
if ms:
current_folder = os.path.dirname(os.path.realpath(__file__))
change_log=os.path.join(current_folder,'changelog.txt')
file = open(change_log, 'r')
news= file.read()
file.close()
sendy('Updated to Version:'+version,'Update Destiny of Deathstar','Destiny of Deathstar Update')
contact(title='Welcome to Version -'+version ,msg=news)
v_path=os.path.join(user_dataDir, 'version.txt')
# if not os.path.exists(v_path):
# current_folder = os.path.dirname(os.path.realpath(__file__))
# file = open(os.path.join(current_folder, 'explain.txt'), 'r')
# msg= file.read()
# file.close()
# TextBox_help('What is Destiny', msg)
file = open(v_path, 'w')
file.write(version)
file.close()
ClearCache()
def update_providers():
if 1:
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Searching Sources', '','')
dp.update(0, 'Please Wait','Searching Sources', '' )
count=0
#copyfile(os.path.join(addonPath, 'resources', 'settings_base.xml'),os.path.join(addonPath, 'resources', 'settings.xml'))
s_file= os.path.join(addonPath, 'resources', 'settings.xml')
file = open(s_file, 'r')
file_data_settings= file.read()
file.close()
onlyfiles=[f for f in listdir(mag_dir) if isfile(join(mag_dir, f))]
add_data_mag=''
found=0
z=0
for files in onlyfiles:
if files !='general.py' and '.pyc' not in files and '.pyo' not in files and '__init__' not in files and files !='resolveurl_temp.py' and files!='cloudflare.py' and files!='Addon.py' and files!='cache.py':
dp.update(int((z*100.0)/(len(onlyfiles))), 'Normal Sources','Searching Sources', files )
z+=1
impmodule = __import__(files.replace('.py',''))
files=files.replace('.py','')
type,sources_s=get_type(impmodule,files)
f_txt=files
if 'tv' in type:
f_txt=f_txt+' [COLOR lightseagreen] (TV) [/COLOR] '
if 'movie' in type:
f_txt=f_txt+' [COLOR gold] (Movies) [/COLOR] '
count+=1
add_data_mag=add_data_mag+'\n'+' <setting id="%s" label="%s" type="bool" default="true" />'%(files,f_txt)
found=1
onlyfiles = [f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
add_data_rd=''
found=0
z=0
for files in onlyfiles:
if files !='general.py' and '.pyc' not in files and '.pyo' not in files and '__init__' not in files and files !='resolveurl_temp.py' and files!='cloudflare.py' and files!='Addon.py' and files!='cache.py':
dp.update(int((z*100.0)/(len(onlyfiles))), 'RD Sources','Searching Sources', files )
z+=1
impmodule = __import__(files.replace('.py',''))
files=files.replace('.py','')
type,sources_s=get_type(impmodule,files)
f_txt=files
if 'tv' in type:
f_txt=f_txt+' [COLOR lightseagreen] (TV) [/COLOR] '
if 'movie' in type:
f_txt=f_txt+' [COLOR gold] (Movies) [/COLOR] '
count+=1
add_data_rd=add_data_rd+'\n'+' <setting id="%s" label="%s" type="bool" default="true" />'%(files,f_txt)
found=1
onlyfiles = [f for f in listdir(done_dir) if isfile(join(done_dir, f))]
add_data=''
found=0
z=0
for files in onlyfiles:
if files !='general.py' and '.pyc' not in files and '.pyo' not in files and '__init__' not in files and files !='resolveurl_temp.py' and files!='cloudflare.py' and files!='Addon.py' and files!='cache.py':
dp.update(int((z*100.0)/(len(onlyfiles))), 'Torrent Sources','Searching Sources', files )
z+=1
count+=1
impmodule = __import__(files.replace('.py',''))
files=files.replace('.py','')
type,sources_s=get_type(impmodule,files)
f_txt=files
if 'tv' in type:
f_txt=f_txt+' [COLOR lightseagreen] (TV) [/COLOR] '
if 'movie' in type:
f_txt=f_txt+' [COLOR gold] (Movies) [/COLOR] '
add_data=add_data+'\n'+' <setting id="%s" label="%s" type="bool" default="true" />'%(files,f_txt)
found=1
if 1:
regex_normal='<!-- Start normal servers -->(.+?)<!-- End normal servers -->'
m_normal=re.compile(regex_normal,re.DOTALL).findall(file_data_settings)
regex_normal='<!-- Start torrent servers -->(.+?)<!-- End torrent servers -->'
m_tr=re.compile(regex_normal,re.DOTALL).findall(file_data_settings)
regex_normal='<!-- Start RD servers -->(.+?)<!-- End RD servers -->'
m_rd=re.compile(regex_normal,re.DOTALL).findall(file_data_settings)
add_data=add_data+'\n'
add_data_rd=add_data_rd+'\n'
add_data_mag=add_data_mag+'\n'
file = open(s_file, 'w')
if len(m_normal)>0:
file_data_settings=file_data_settings.replace('<!-- Start normal servers -->%s<!-- End normal servers -->'%m_normal[0],'<!-- Start normal servers -->%s<!-- End normal servers -->'%add_data)
if len(m_rd)>0:
file_data_settings=file_data_settings.replace('<!-- Start RD servers -->%s<!-- End RD servers -->'%m_rd[0],'<!-- Start RD servers -->%s<!-- End RD servers -->'%add_data_rd)
if len(m_tr)>0:
file_data_settings=file_data_settings.replace('<!-- Start torrent servers -->%s<!-- End torrent servers -->'%m_tr[0],'<!-- Start torrent servers --> %s <!-- End torrent servers -->'%add_data_mag)
file.write(file_data_settings)
file.close()
dp.close()
xbmc.executebuiltin(u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Updated %s providers'%str(count)))
def PrintException():
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
return ( 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj))
def ClearCache():
from storage import Storage
import shutil
cache.clear(['cookies', 'pages','posters'])
Storage.open("parsers").clear()
storage_path = os.path.join(xbmc.translatePath("special://temp"), ".storage")
if os.path.isdir(storage_path):
for f in os.listdir(storage_path):
if re.search('.cache', f):
os.remove(os.path.join(storage_path, f))
cookies_path = xbmc.translatePath("special://temp")
if os.path.isdir(cookies_path):
for f in os.listdir(cookies_path):
if re.search('.jar', f):
os.remove(os.path.join(cookies_path, f))
res = koding.Get_All_From_Table("Table_names")
for results in res:
table_nm = results['name']
koding.Remove_Table(table_nm)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Cleaned'.decode('utf8'))).encode('utf-8'))
class sources_window(pyxbmct.AddonDialogWindow):
def __init__(self, title='',list=[],time_c=10,img=' ',txt=''):
super(sources_window, self).__init__('Select source')
self.list_o=list
self.title='Select source'
wd=int(1250)
hd=int(700)
px=int(10)
py=int(10)
self.setGeometry(wd, hd, 10, 4,pos_x=px, pos_y=py)
self.time_c=time_c
self.img=img
self.txt=txt
self.set_info_controls()
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
#Thread(target=self.background_task).start()
def set_info_controls(self):
# Label
#self.label = pyxbmct.Label('Sources:'+str(len(self.list_o)))
#self.placeControl(self.label, 9, 2, 3, 1)
self.image = pyxbmct.Image( self.img)
self.placeControl(self.image, 0, 0, 2, 1)
self.textbox = pyxbmct.TextBox()
self.placeControl(self.textbox, 0,1, 2, 3)
self.textbox.setText(self.txt)
# Set auto-scrolling for long TexBox contents
self.textbox.autoScroll(1000, 1000, 1000)
def click_list(self):
global list_index
list_index=self.list.getSelectedPosition()
self.close()
def click_c(self):
global list_index
list_index=888
current_list_item=''
self.close()
def set_active_controls(self):
# List
self.list = pyxbmct.List(font='font18', _imageWidth=75, _imageHeight=75, _itemTextXOffset=5, _itemTextYOffset=2, _itemHeight=55, _space=2, _alignmentY=4)
self.placeControl(self.list, 2, 0,9, 4)
# Add items to the list
items = self.list_o
n_items=[]
a_links=[]
icon_2160='http://www.dexonsystems.com/upload/public/images/DexonOthers/4k-uhd-logo-png.PNG'
icon_1080='https://www.incehesap.com/resim/content/Webcam/Logitech_c920/Logitech_c920_9.png'
icon_720='http://cctv.pcstown.com/ebay/bimg/New%20Wifi%20Cam/hd720p.png'
icon_480='https://cdn3.iconfinder.com/data/icons/video-icons-2/299/480p-512.png'
icon_360='https://cdn4.iconfinder.com/data/icons/proglyphs-multimedia/512/Standard_Definition-512.png'
icon_un='https://cdn0.iconfinder.com/data/icons/basic-uses-symbol-vol-2/100/Help_Need_Suggestion_Question_Unknown-512.png'
for it in items:
text_i=it.split('$$$$$$$')[0]
n_items.append(text_i)
a_links.append(it.split('$$$$$$$')[1])
if '2160' in text_i:
icon_q=icon_2160
elif '1080' in text_i:
icon_q=icon_1080
elif '720' in text_i:
icon_q=icon_720
elif '480' in text_i:
icon_q=icon_480
elif '360' in text_i:
icon_q=icon_360
else:
icon_q=icon_un
item = xbmcgui.ListItem(label=text_i, iconImage=icon_q, thumbnailImage=icon_q)
self.list.addItem(item)
#self.list.addItems(n_items)
# Connect the list to a function to display which list item is selected.
self.connect(self.list, self.click_list)
# Connect key and mouse events for list navigation feedback.
self.button = pyxbmct.Button('Close')
self.placeControl(self.button, 9, 3)
# Connect control to close the window.
self.connect(self.button, self.click_c)
def set_navigation(self):
# Set navigation between controls
self.list.controlRight(self.button)
self.list.controlLeft(self.button)
#self.list.controlDown(self.button)
self.button.controlUp(self.list)
self.button.controlLeft(self.list)
self.button.controlRight(self.list)
# Set initial focus
self.setFocus(self.list)
def slider_update(self):
# Update slider value label when the slider nib moves
try:
if self.getFocus() == self.slider:
self.slider_value.setLabel('{:.1F}'.format(self.slider.getPercent()))
except (RuntimeError, SystemError):
pass
def radio_update(self):
# Update radiobutton caption on toggle
if self.radiobutton.isSelected():
self.radiobutton.setLabel('On')
else:
self.radiobutton.setLabel('Off')
def list_update(self):
# Update list_item label when navigating through the list.
try:
if self.getFocus() == self.list:
self.list_item_label.setLabel(self.list.getListItem(self.list.getSelectedPosition()).getLabel())
else:
self.list_item_label.setLabel('')
except (RuntimeError, SystemError):
pass
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=1',),
('WindowClose', 'effect=fade start=100 end=0 time=1',)])
class Thread(threading.Thread):
def __init__(self, target, *args):
self._target = target
self._args = args
threading.Thread.__init__(self)
def run(self):
self._target(*self._args)
def get_custom_params(item):
param=[]
item=item.split("?")
if len(item)>=2:
paramstring=item[1]
if len(paramstring)>=2:
params=item[1]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def get_params():
param=[]
if len(sys.argv)>=2:
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def save_to_fav(plot):
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
if plot+'\n' not in file_data:
file_data.append(plot)
change=1
for i in range (len(file_data)-1,0,-1):
file_data[i]=file_data[i].replace('\n','')
if len(file_data[i])<3:
file_data.pop(i)
change=1
if change>0:
file = open(save_file, 'w')
file.write('\n'.join(file_data))
file.close()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Saved')).encode('utf-8'))
def get_tv_poster():
import random
all_img=[]
url=domain_s+'api.themoviedb.org/3/tv/on_the_air?api_key=1248868d7003f60f2386595db98455ef&language=en-US'
x=requests.get(url).json()
for items in x['results']:
if 'backdrop_path' in items:
if items['backdrop_path']==None:
fan=' '
else:
fan=domain_s+'image.tmdb.org/t/p/original/'+items['backdrop_path']
all_img.append(fan)
random.shuffle(all_img)
return all_img
def get_movie_poster():
import random
all_img=[]
url=domain_s+'api.themoviedb.org/3/movie/now_playing?api_key=1248868d7003f60f2386595db98455ef&language=en-US'
x=requests.get(url).json()
for items in x['results']:
if 'backdrop_path' in items:
if items['backdrop_path']==None:
fan=' '
all_img.append(fan)
else:
fan=domain_s+'image.tmdb.org/t/p/original/'+items['backdrop_path']
all_img.append(fan)
elif 'poster_path' in items:
if items['poster_path']==None:
fan=' '
all_img.append(fan)
else:
fan=domain_s+'image.tmdb.org/t/p/original/'+items['poster_path']
all_img.append(fan)
random.shuffle(all_img)
return all_img
def kids_world():
addDir3('Cartoons'.decode('utf8'),'www',58,BASE_LOGO+'cartoons.png','http://digitalspyuk.cdnds.net/16/31/980x490/landscape-1470221630-cartoon-heroes.jpg','Cartoons'.decode('utf8'))
addDir3('Anime'.decode('utf8'),'www',68,BASE_LOGO+'anime.png','https://images.wallpaperscraft.com/image/girls_und_panzer_nishizumi_miho_tank_hill_girl_100910_3840x2160.jpg','Anime'.decode('utf8'))
def live():
addDir3('Live TV'.decode('utf8'),'www',146,BASE_LOGO+'livetv.png','https://cdn2.vectorstock.com/i/1000x1000/85/36/live-stream-tv-logo-icon-vector-16968536.jpg','Live TV'.decode('utf8'))
addDir3('Acestream'.decode('utf8'),'www',76,BASE_LOGO+'acestream.png','https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg','Acestream'.decode('utf8'))
addDir3('Live Sports'.decode('utf8'),'www',40,BASE_LOGO+'live.png','https://scotch-res.cloudinary.com/image/upload/w_900,q_auto:good,f_auto/v1549206813/gyxlxwotow6xxysb527u.png','Live Sports'.decode('utf8'))
addDir3('M3u8 Lists'.decode('utf8'),'www',55,BASE_LOGO+'m3u8.png','https://indianapublicmedia.org/wp-content/themes/ipm-aux-services/images/services/transmission.jpg','M3u8 Lists'.decode('utf8'))
addDir3('NBA','https://www.nbafullhd.com',105,BASE_LOGO+'nba.png','https://cdn.nba.net/nba-drupal-prod/2017-08/SEO-image-NBA-logoman.jpg','NBA'.decode('utf8'))
def mysettings():
addNolink('Settings','www',24,False,iconimage=BASE_LOGO+'setting.png',fanart=domain_s+'www.wanderlustworker.com/wp-content/uploads/2014/05/setting-smarter-goals.jpg')
addNolink('Authorize Real Debrid','www',138,False,iconimage=BASE_LOGO+'rd.png',fanart='https://troypoint.com/wp-content/uploads/2017/10/install-real-debrid-kodi.jpg')
addNolink('Clear Cache','www',16,False,iconimage=BASE_LOGO+'clearcache.png',fanart=domain_s+'digitalart.io/storage/artworks/1264/pacific-rim-wallpaper-striker.jpeg')
addNolink('Update Sources','www',36,False,iconimage=BASE_LOGO+'update.png',fanart='https://images.idgesg.net/images/article/2018/03/update_cycle_arrows_on_background_of_orange_arrows_by_ranjith_siji_cc0_via_pixabay-100751945-large.jpg')
addNolink('Recover from Backup','www',89,False,iconimage=BASE_LOGO+'recover.png',fanart='https://hiverhq.com/blog/wp-content/uploads/2014/11/best-backup-tools-for-Google-Apps-and-Gmail-1.jpg')
addNolink('Change Log','www',100,False,iconimage=BASE_LOGO+'changelog.png',fanart='https://one2onenetwork.com/wp-content/uploads/2011/06/things-to-change-t2c-list.jpg')
addDir3('Check Sources'.decode('utf8'),'www',98,BASE_LOGO+'server.png','https://wallpaperstock.net/wallpapers/thumbs1/8901wide.jpg','Check Sources'.decode('utf8'))
class SelectorDialog(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.title = kwargs['title']
self.list_items=kwargs['list']
self.f_list=[]
self.steps = kwargs['steps']
self.items = []
self.selection = None
self.insideIndex = -1
self.completed_steps = 0
xbmc.executebuiltin('Action(FullScreen)')
def get_selection(self):
""" get final selection """
return self.selection
def onInit(self):
self.list = self.getControl(450)
self.list.controlLeft(self.list)
self.list.controlRight(self.list)
self.setFocus(self.list)
self._inside_root(select=1)
def onAction(self, action):
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448,):
if self.insideIndex == -1:
self.close()
else:
self._inside_root(select=self.insideIndex)
def onClick(self, controlID):
num = self.list.getSelectedPosition()
if num >= 0:
if self.insideIndex == -1:
self._inside(num)
self.selection=self.f_list[self.insideIndex]
self.close()
def onFocus(self, controlID):
if controlID in (3, 61):
self.setFocus(self.list)
def _inside_root(self, select=-1):
#logging.warning(self.items)
all_links=[]
for items in self.list_items:
listitem = xbmcgui.ListItem(items)
self.list.addItem(listitem)
self.f_list.append(items)
if select >= 0:
self.list.selectItem(select)
self.insideIndex = -1
def _inside(self, num):
if num == -1:
self._inside_root(select=self.insideIndex)
return
if 1:#with self.lock:
self.insideIndex = num
def main_menu():
if len(sys.argv)<2:
return 0
dbcur.execute("SELECT COUNT(*) FROM AllData")
fix_setting()
match = dbcur.fetchone()
level_index=(match[0]/100)
if level_index>9:
level_index=9
if Addon.getSetting("m_jen")=='true':
addDir3('Deathstar'.decode('utf8'),'www',42,BASE_LOGO+'jen.png','https://geek-prime.com/wp-content/uploads/2014/02/Destiny-2-4k-hd-wallpaper-invasion-ghaul.jpg','The Deathstar Addon'.decode('utf8'))
if Addon.getSetting("m_movies")=='true':
addDir3('Movies'.decode('utf8'),'www',13,BASE_LOGO+'movies.png','http://hdqwalls.com/wallpapers/avengers-infinity-war-2018-poster-fan-made-62.jpg','Movies'.decode('utf8'))
if Addon.getSetting("m_tvshows")=='true':
addDir3('TV Shows'.decode('utf8'),'www',14,BASE_LOGO+'tvshow.png','https://i.imgur.com/a174Ipu.jpg','TV Shows'.decode('utf8'))
if Addon.getSetting("m_kids")=='true':
addDir3('Kids'.decode('utf8'),'www',44,BASE_LOGO+'kids.png','https://www.desktopbackground.org/download/o/2013/09/27/645377_kids-kung-fu-panda-3-movie-4k-wallpapers_3840x2160_h.jpg','Kids'.decode('utf8'))
if Addon.getSetting("m_live")=='true':
addDir3('Live'.decode('utf8'),'www',37,BASE_LOGO+'livetv.png','https://scotch-res.cloudinary.com/image/upload/w_900,q_auto:good,f_auto/v1549206813/gyxlxwotow6xxysb527u.png','Live'.decode('utf8'))
logging.warning( 'USE TRAKT')
logging.warning( Addon.getSetting("use_trak"))
if Addon.getSetting("m_trakt")=='true':
addDir3('Trakt'.decode('utf8'),'www',29,BASE_LOGO+'trakt.png',domain_s+'www.mjdtech.net/content/images/2016/02/traktfeat.jpg','Trakt')
if Addon.getSetting("m_lastp")=='true':
addDir3('Last Played'.decode('utf8'),'www',49,BASE_LOGO+'last.png','https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQgZlTxhsnI3lZ9gzBokPvapZG1W3S-_G1UNCohkK5il9r5myUF','Last Played'.decode('utf8'))
if Addon.getSetting("m_tools")=='true':
addDir3('Tools'.decode('utf8'),'www',38,BASE_LOGO+'tools.png','http://hdqwalls.com/wallpapers/avengers-infinity-war-2018-poster-fan-made-62.jpg','Tools'.decode('utf8'))
plot='[COLOR gold]'+'You are in Level '+str(level_index+1)+'\n'+'So Far You Have Watched '+str(match[0]) +' Movies and Episode '+' Keep Going.... '+'[/COLOR]'+'\nAnother ' +str((100*(level_index+1))-int(match[0]))+' To Move to the Next Level :-)'
if Addon.getSetting("m_rating")=='true':
addLink(''+'My Rating'+'',level_movies[level_index],35,False,iconimage=BASE_LOGO+'rating.png',fanart=level_fanart[level_index],description=plot)
if Addon.getSetting("m_search")=='true':
addDir3('Search'.decode('utf8'),'www',15,BASE_LOGO+'search.png','https://geek-prime.com/wp-content/uploads/2014/02/Destiny-2-4k-hd-wallpaper-invasion-ghaul.jpg','Search'.decode('utf8'))
# if Addon.getSetting("m_setting")=='true':
# # addNolink('Settings','www',24,False,iconimage=BASE_LOGO+'setting.png',fanart=domain_s+'www.wanderlustworker.com/wp-content/uploads/2014/05/setting-smarter-goals.jpg')
# if Addon.getSetting("m_enterr")=='true':
# addNolink('Enable Real Debrid','www',138,False,iconimage=BASE_LOGO+'rd.png',fanart='https://troypoint.com/wp-content/uploads/2017/10/install-real-debrid-kodi.jpg')
# if Addon.getSetting("m_clear")=='true':
# addNolink('Clear Cache','www',16,False,iconimage=BASE_LOGO+'clearcache.png',fanart=domain_s+'digitalart.io/storage/artworks/1264/pacific-rim-wallpaper-striker.jpeg')
# if Addon.getSetting("m_update")=='true':
# addNolink('Update Sources','www',36,False,iconimage=BASE_LOGO+'update.png',fanart='https://images.idgesg.net/images/article/2018/03/update_cycle_arrows_on_background_of_orange_arrows_by_ranjith_siji_cc0_via_pixabay-100751945-large.jpg')
# if Addon.getSetting("m_recover")=='true':
# addNolink('Recover from Backup','www',89,False,iconimage=BASE_LOGO+'recover.png',fanart='https://hiverhq.com/blog/wp-content/uploads/2014/11/best-backup-tools-for-Google-Apps-and-Gmail-1.jpg')
# if Addon.getSetting("m_chnage")=='true':
# addNolink('Change Log','www',100,False,iconimage=BASE_LOGO+'changelog.png',fanart='https://one2onenetwork.com/wp-content/uploads/2011/06/things-to-change-t2c-list.jpg')
# if Addon.getSetting("m_checks")=='true':
# addDir3('Check Sources'.decode('utf8'),'www',98,BASE_LOGO+'server.png','https://wallpaperstock.net/wallpapers/thumbs1/8901wide.jpg','Check sources'.decode('utf8'))
# if Addon.getSetting("m_fav")=='true':
# addDir3('My Favorites'.decode('utf8'),'all',18,BASE_LOGO+'fav.png','http://jonvilma.com/images/reign-tv-series-2.jpg','My Favorites'.decode('utf8'))
def movies_menu():
import datetime
#all_img=get_movie_poster()
now = datetime.datetime.now()
link_url='https://www.youtube.com/results?search_query=%D7%98%D7%A8%D7%99%D7%99%D7%9C%D7%A8+%D7%9E%D7%AA%D7%95%D7%A8%D7%92%D7%9D+{0}&page=1'.format( str(now.year))
all_img=cache.get(get_movie_poster,24, table='posters')
if allow_debrid:
addDir3('One Click RD Movies','www',149,BASE_LOGO+'oneclickrd.png',all_img[19],'One Click RD Movies')
addDir3('One Click Free Movies','https://moviesmax.net',150,BASE_LOGO+'oneclickfree.png',all_img[1],'One Click Free Movies')
addDir3('Hot Movies','http://api.themoviedb.org/3/trending/movie/week?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'hotmovies.png',all_img[13],'Hot Movies')
addDir3('Lastest HD'.decode('utf8'),domain_s+'www.dvdsreleasedates.com/movies/',28,BASE_LOGO+'latest.png',all_img[5],'Lastest HD'.decode('utf8'),isr=' ')
addDir3('Popular','http://api.themoviedb.org/3/movie/popular?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'popular.png',all_img[4],'Popular Movies')
addDir3('In Theaters'.decode('utf8'),'http://api.themoviedb.org/3/movie/now_playing?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'intheatres.png',all_img[10],'In Theaters'.decode('utf8'))
addDir3('Genres','http://api.themoviedb.org/3/genre/movie/list?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',2,BASE_LOGO+'genres.png',all_img[0],'Genres'.decode('utf8'))
addDir3('Studio'.decode('utf8'),'movie',112,BASE_LOGO+'studio.png','https://cdn-static.denofgeek.com/sites/denofgeek/files/styles/main_wide/public/2016/04/movlic_studios_1.jpg?itok=ih8Z7wOk','Studios')
addDir3('Recommended for YOU','www',26,BASE_LOGO+'recomm.png',all_img[14],'Recommended for YOU',isr=' ')
addDir3('Last Watched'.decode('utf8'),'movie',91,BASE_LOGO+'lwatched.png',all_img[7],'Last watched',isr=' ')
dbcur.execute("SELECT * FROM lastlinkmovie WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
try:
if url!=' ':
if 'http' not in url:
url=url.decode('base64')
addLink('Last Played Link', 'latest_movie',5,False,iconimage,fanart,description,data=show_original_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=prev_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year)
except Exception as e:
logging.warning(e)
pass
addDir3('Actors'.decode('utf8'),'www',72,BASE_LOGO+'actors.png','https://hdqwalls.com/download/avengers-infinity-war-imax-poster-na-2048x1152.jpg','Actors'.decode('utf8'))
addDir3('Search','http://api.themoviedb.org/3/search/movie?api_key=1248868d7003f60f2386595db98455ef&query=%s&language=en&append_to_response=origin_country&page=1',3,BASE_LOGO+'searchm.png','http://www.videomotion.co.il/wp-content/uploads/whatwedo-Pic-small.jpg','search')
# addDir3('Popular in 20 years'.decode('utf8'),'movie',133,BASE_LOGO+'popular20.png',all_img[11],'Popular in 20 years',data='20',original_title='1')
# addDir3('Popular in 10 years'.decode('utf8'),'movie',133,BASE_LOGO+'popular10.png',all_img[12],'Popular in 10 years',data='10',original_title='1')
# addDir3('Based on True Story','https://www.imdb.com/search/title?genres=biography&sort=num_votes,desc&explore=title_type,genres',114,BASE_LOGO+'based.png',all_img[14],'Based on true story',isr=' ')
# addDir3('Oscar Winners Best Movie','https://www.imdb.com/search/title?count=100&groups=oscar_best_picture_winners&sort=year,desc&ref_=nv_ch_osc',114,BASE_LOGO+'oscarmovie.png',all_img[16],'Oscar Winners best movie',isr=' ')
# addDir3('Oscar Winners Best Actor','https://www.imdb.com/list/ls068045646/?sort=list_order,asc&mode=detail&page=1',134,BASE_LOGO+'oscaractor.png',all_img[17],'Oscar Winners best actor',isr=' ')
# # addDir3('IMDB Popular','https://www.imdb.com/chart/moviemeter?ref_=nv_mv_mpm',114,BASE_LOGO+'imdbp.png',all_img[15],'IMDB Popular',isr=' ')
# addDir3('Favourites'.decode('utf8'),'movies',18,BASE_LOGO+'favorites.png','http://4.bp.blogspot.com/-8q4ops3bX_0/T0TWUOu5ETI/AAAAAAAAA1A/AQMDv0Sv4Cs/s1600/logo1.gif','Favorites'.decode('utf8'))
def tv_neworks():
if Addon.getSetting("order_networks")=='0':
order_by='popularity.desc'
elif Addon.getSetting("order_networks")=='2':
order_by='vote_average.desc'
elif Addon.getSetting("order_networks")=='1':
order_by='first_air_date.desc'
addDir3('ABC'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=2&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'http://logok.org/wp-content/uploads/2014/03/abc-gold-logo-880x660.png','https://i.ytimg.com/vi/xSOp4HJTxH4/maxresdefault.jpg','ABC'.decode('utf8'))
addDir3('AMAZON'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=1024&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'http://g-ec2.images-amazon.com/images/G/01/social/api-share/amazon_logo_500500._V323939215_.png','https://cdn.images.express.co.uk/img/dynamic/59/590x/Amazon-Fire-TV-Amazon-Fire-TV-users-Amazon-Fire-TV-stream-Amazon-Fire-TV-Free-Dive-TV-channel-Amazon-Fire-TV-news-Amazon-1010042.jpg?r=1535541629130','AMAZON'.decode('utf8'))
addDir3('CBS'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=16&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://cdn.freebiesupply.com/logos/large/2x/cbs-logo-png-transparent.png','https://tvseriesfinale.com/wp-content/uploads/2014/10/cbs40-590x221.jpg','HBO'.decode('utf8'))
addDir3('CW'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=71&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://www.broadcastingcable.com/.image/t_share/MTU0Njg3Mjc5MDY1OTk5MzQy/tv-network-logo-cw-resized-bc.jpg','https://i2.wp.com/nerdbastards.com/wp-content/uploads/2016/02/The-CW-Banner.jpg','The CW'.decode('utf8'))
addDir3('HBO'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=49&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://filmschoolrejects.com/wp-content/uploads/2018/01/hbo-logo.jpg','https://www.hbo.com/content/dam/hbodata/brand/hbo-static-1920.jpg','HBO'.decode('utf8'))
addDir3('Hulu'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=453&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://i1.wp.com/thetalkinggeek.com/wp-content/uploads/2012/03/hulu_logo_spiced-up.png?resize=300%2C225&ssl=1','https://www.google.com/url?sa=i&rct=j&q=&esrc=s&source=images&cd=&cad=rja&uact=8&ved=2ahUKEwi677r77IbeAhURNhoKHeXyB-AQjRx6BAgBEAU&url=https%3A%2F%2Fwww.hulu.com%2F&psig=AOvVaw0xW2rhsh4UPsbe8wPjrul1&ust=1539638077261645','hulu'.decode('utf8'))
addDir3('NBC'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=6&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://designobserver.com/media/images/mondrian/39684-NBC_logo_m.jpg','https://www.nbcstore.com/media/catalog/product/cache/1/image/1000x/040ec09b1e35df139433887a97daa66f/n/b/nbc_logo_black_totebagrollover.jpg','NBC'.decode('utf8'))
addDir3('NetFlix'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=213&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://art.pixilart.com/705ba833f935409.png','https://i.ytimg.com/vi/fJ8WffxB2Pg/maxresdefault.jpg','NetFlix'.decode('utf8'))
addDir3('SyFy'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&with_networks=77&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'http://cdn.collider.com/wp-content/uploads/syfy-logo1.jpg','https://imagesvc.timeincapp.com/v3/mm/image?url=https%3A%2F%2Fewedit.files.wordpress.com%2F2017%2F05%2Fdefault.jpg&w=1100&c=sc&poi=face&q=85','SyFy'.decode('utf8'))
def movie_prodiction():
if Addon.getSetting("order_networks")=='0':
order_by='popularity.desc'
elif Addon.getSetting("order_networks")=='2':
order_by='vote_average.desc'
elif Addon.getSetting("order_networks")=='1':
order_by='first_air_date.desc'
addDir3('20th Century Fox'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=25&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://pmcdeadline2.files.wordpress.com/2017/03/20th-century-fox-cinemacon1.jpg?w=446&h=299&crop=1','https://vignette.wikia.nocookie.net/simpsons/images/8/80/TCFTV_logo_%282013-%3F%29.jpg/revision/latest?cb=20140730182820','20th Century Fox'.decode('utf8'))
addDir3('Columbia Pictures'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=5&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://static.tvtropes.org/pmwiki/pub/images/lady_columbia.jpg','https://vignette.wikia.nocookie.net/marveldatabase/images/1/1c/Columbia_Pictures_%28logo%29.jpg/revision/latest/scale-to-width-down/1000?cb=20141130063022','Columbia Pictures'.decode('utf8'))
addDir3('DC Studios'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=9993&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://pmcvariety.files.wordpress.com/2013/09/dc-comics-logo.jpg?w=1000&h=563&crop=1','http://www.goldenspiralmedia.com/wp-content/uploads/2016/03/DC_Comics.jpg','DC Studios'.decode('utf8'))
addDir3('DreamWorks'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=7&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://www.dreamworksanimation.com/share.jpg','https://www.verdict.co.uk/wp-content/uploads/2017/11/DA-hero-final-final.jpg','DreamWorks'.decode('utf8'))
addDir3('Gracie Films'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=18&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://i.ytimg.com/vi/q_slAJmZBeQ/hqdefault.jpg','https://i.ytimg.com/vi/yGofbuJTb4g/maxresdefault.jpg','Gracie Films'.decode('utf8'))
addDir3('Imagine Entertainment'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=23&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://s3.amazonaws.com/fs.goanimate.com/files/thumbnails/movie/2813/1661813/9297975L.jpg','https://www.24spoilers.com/wp-content/uploads/2004/06/Imagine-Entertainment-logo.jpg','Imagine Entertainment'.decode('utf8'))
addDir3('Lions Gate Films'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=35&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'http://image.wikifoundry.com/image/1/QXHyOWmjvPRXhjC98B9Lpw53003/GW217H162','https://vignette.wikia.nocookie.net/fanon/images/f/fe/Lionsgate.jpg/revision/latest?cb=20141102103150','Lions Gate Films'.decode('utf8'))
addDir3('Lucasfilm'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=1&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://fontmeme.com/images/lucasfilm-logo.png','https://i.ytimg.com/vi/wdYaG3o3bgE/maxresdefault.jpg','Lucasfilm'.decode('utf8'))
addDir3('Marvel'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=7505&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://yt3.ggpht.com/a-/AN66SAwQlZAow0EBMi2-tFht-HvmozkqAXlkejVc4A=s900-mo-c-c0xffffffff-rj-k-no','https://images-na.ssl-images-amazon.com/images/I/91YWN2-mI6L._SL1500_.jpg','Marvel'.decode('utf8'))
addDir3('MGM'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=21&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://pbs.twimg.com/profile_images/958755066789294080/L9BklGz__400x400.jpg','https://assets.entrepreneur.com/content/3x2/2000/20150818171949-metro-goldwun-mayer-trade-mark.jpeg','MGM'.decode('utf8'))
addDir3('Miramax'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=14&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://vignette.wikia.nocookie.net/disney/images/8/8b/1000px-Miramax_1987_Print_Logo.png/revision/latest?cb=20140902041428','https://i.ytimg.com/vi/4keXxB94PJ0/maxresdefault.jpg','Miramax'.decode('utf8'))
addDir3('New Line Cinema'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=12&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://upload.wikimedia.org/wikipedia/en/thumb/0/04/New_Line_Cinema.svg/1200px-New_Line_Cinema.svg.png','https://vignette.wikia.nocookie.net/theideas/images/a/aa/New_Line_Cinema_logo.png/revision/latest?cb=20180210122847','New Line Cinema'.decode('utf8'))
addDir3('Orion Pictures'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=41&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://i.ytimg.com/vi/43OehM_rz8o/hqdefault.jpg','https://i.ytimg.com/vi/g58B0aSIB2Y/maxresdefault.jpg','Lions Gate Films'.decode('utf8'))
addDir3('Paramount'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=4&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://upload.wikimedia.org/wikipedia/en/thumb/4/4d/Paramount_Pictures_2010.svg/1200px-Paramount_Pictures_2010.svg.png','https://vignette.wikia.nocookie.net/logopedia/images/a/a1/Paramount_Pictures_logo_with_new_Viacom_byline.jpg/revision/latest?cb=20120311200405&format=original','Paramount'.decode('utf8'))
addDir3('Pixar'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=3&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://elestoque.org/wp-content/uploads/2017/12/Pixar-lamp.png','https://wallpapercave.com/wp/GysuwJ2.jpg','Pixar'.decode('utf8'))
addDir3('Sony Pictures'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=34&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://upload.wikimedia.org/wikipedia/commons/thumb/6/63/Sony_Pictures_Television_logo.svg/1200px-Sony_Pictures_Television_logo.svg.png','https://vignette.wikia.nocookie.net/logopedia/images/2/20/Sony_Pictures_Digital.png/revision/latest?cb=20140813002921','Sony Pictures'.decode('utf8'))
addDir3('Walt Disney Pictures'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=2&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'https://i.ytimg.com/vi/9wDrIrdMh6o/hqdefault.jpg','https://vignette.wikia.nocookie.net/logopedia/images/7/78/Walt_Disney_Pictures_2008_logo.jpg/revision/latest?cb=20160720144950','Walt Disney Pictures'.decode('utf8'))
addDir3('Warner Bros.'.decode('utf8'),domain_s+'api.themoviedb.org/3/discover/movie?api_key=1248868d7003f60f2386595db98455ef&with_companies=174&language=en&sort_by={0}&timezone=America%2FNew_York&include_null_first_air_dates=false&page=1'.format(order_by),3,'http://looking.la/wp-content/uploads/2017/10/warner-bros.png','https://cdn.arstechnica.net/wp-content/uploads/2016/09/warner.jpg','SyFy'.decode('utf8'))
def tv_menu():
import datetime
now = datetime.datetime.now()
all_img=cache.get(get_tv_poster,24, table='posters')
addDir3('Series Tracker','tv',32,BASE_LOGO+'tracks.png',all_img[6],'Series tracker',isr=' ')
addDir3('Hot TV','http://api.themoviedb.org/3/trending/tv/week?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'hottv.png',all_img[13],'Hot This Week')
addDir3('Popular','http://api.themoviedb.org/3/tv/popular?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'populartv.png',all_img[1],'Popular')
addDir3('New',domain_s+'api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&language=en-US&sort_by=popularity.desc&first_air_date_year='+str(now.year)+'&timezone=America%2FNew_York&include_null_first_air_ates=false&language=en&page=1',3,BASE_LOGO+'new.png',all_img[3],'New')
addDir3('Running Series','https://api.themoviedb.org/3/tv/on_the_air?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',3,BASE_LOGO+'running.png',all_img[8],'Running Series')
addDir3('Genres','http://api.themoviedb.org/3/genre/tv/list?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1',2,BASE_LOGO+'genrestv.png',all_img[0],'Genres')
addDir3('Networks','tv',101,BASE_LOGO+'networks.png','https://images.pond5.com/tv-networks-logos-loop-footage-042898083_prevstill.jpeg','Networks Shows')
addDir3('Years','tv_years&page=1',3,BASE_LOGO+'yearstv.png',all_img[2],'Years')
addDir3('Recommended Shows for You','www',27,BASE_LOGO+'recotv.png',all_img[5],'Recommended Shows for You Based on your History',isr=' ')
addDir3('Watched Shows','tv',91,BASE_LOGO+'watchedtv.png',all_img[7],'watched shows',isr=' ')
dbcur.execute("SELECT * FROM lastlinktv WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
try:
if url!=' ':
if 'http' not in url:
url=url.decode('base64')
addLink('Last Played Link', 'latest_tv',5,False,iconimage,fanart,description,data=show_original_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=prev_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year)
except Exception as e:
logging.warning(e)
pass
addDir3('Actors'.decode('utf8'),'www',72,BASE_LOGO+'actors.png','https://hdqwalls.com/download/avengers-infinity-war-imax-poster-na-2048x1152.jpg','Actors'.decode('utf8'))
addDir3('Search','http://api.themoviedb.org/3/search/tv?api_key=1248868d7003f60f2386595db98455ef&query=%s&language=en&page=1',3,BASE_LOGO+'searchtv.png',domain_s+'f.frogi.co.il/news/640x300/010170efc8f.jpg','Search')
# addDir3('Popular in 20 Years','tv',133,BASE_LOGO+'popular20tv.png',all_img[11],'Popular in 20 years',data='20',original_title='1')
# addDir3('Popular in 10 Years','tv',133,BASE_LOGO+'popular10tv.png',all_img[12],'Popular in 10 years',data='10',original_title='1')
# addDir3('Favourite Shows','tv',18,BASE_LOGO+'favoritestv.png','http://4.bp.blogspot.com/-8q4ops3bX_0/T0TWUOu5ETI/AAAAAAAAA1A/AQMDv0Sv4Cs/s1600/logo1.gif','Favorites shows')
# addDir3('Popular IMDB','https://www.imdb.com/search/title?title_type=tv_series',114,BASE_LOGO+'popimdbtv.png',all_img[8],'Popular IMDB',isr=' ')
def search_menu():
addDir3('Search Movie','http://api.themoviedb.org/3/search/movie?api_key=1248868d7003f60f2386595db98455ef&query=%s&language=en&append_to_response=origin_country&page=1',3,BASE_LOGO+'searchm.png','http://www.videomotion.co.il/wp-content/uploads/whatwedo-Pic-small.jpg','search')
addDir3('Search TV Show','http://api.themoviedb.org/3/search/tv?api_key=1248868d7003f60f2386595db98455ef&query=%s&language=en&page=1',3,BASE_LOGO+'searchtv.png',domain_s+'f.frogi.co.il/news/640x300/010170efc8f.jpg','Search')
def get_genere(link,icon):
images={}
html=requests.get(link).json()
for data in html['genres']:
if '/movie' in link:
new_link='http://api.themoviedb.org/3/genre/%s/movies?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%str(data['id'])
else:
new_link='http://api.themoviedb.org/3/discover/tv?api_key=1248868d7003f60f2386595db98455ef&sort_by=popularity.desc&with_genres=%s&language=en&page=1'%str(data['id'])
addDir3(data['name'],new_link,3,icon,DESIMG,data['name'])
def merge_two_dicts(x, y):
z = x.copy() # start with x's keys and values
z.update(y) # modifies z with y's keys and values & returns None
return z
def start_window2(id,tv_movie,name,selected_option):
if selected_option=='2':
send_type='find_similar'
else:
send_type=''
menu = sources_search2('plugin.video.destinyds', id,tv_movie,send_type)
menu.doModal()
del menu
def start_window(id,tv_movie,name,selected_option):
menu = sources_search('plugin.video.destinyds',id,tv_movie,name)
menu.doModal()
del menu
def get_subs_trd(imdb_id,season,episode):
da=[]
da.append((imdb_id,season,episode))
logging.warning('subtitle trd')
logging.warning(da)
if season=='%20':
season=None
if episode=='%20':
episode=None
result=cache.get(get_sub_server,24,imdb_id,season,episode, table='pages')
return 'ok'
def check_cached(magnet):
import real_debrid
rd = real_debrid.RealDebrid()
check=False
hash = str(re.findall(r'btih:(.*?)&', magnet)[0].lower())
hashCheck = rd.checkHash(hash)
if hash in hashCheck:
if 'rd' in hashCheck[hash]:
if len(hashCheck[hash]['rd'])>0:
check=True
return check
def get_condition(name1,links,server,res,tv_movie,f_result,data,original_title):
try:
res_table=['2160','1080','720','480','360']
check_r_l={}
condition_sources=False
str_check=[]
try:#Resolution
a=int(res)
except:
res='0'
if tv_movie=='tv':
min_super_fast=res_table[int(Addon.getSetting("tv_min_super_fast"))]
max_super_fast=res_table[int(Addon.getSetting("tv_max_super_fast"))]
else:
min_super_fast=res_table[int(Addon.getSetting("movies_min_super_fast"))]
max_super_fast=res_table[int(Addon.getSetting("movies_max_super_fast"))]
if int(res)>=int(min_super_fast) and int(res)<=int(max_super_fast):
check_r_l['server_type_res']=True
str_check.append(' V Res:%s '%res)
condition_res=True
else:
check_r_l['server_type_res']=False
str_check.append(' X Res:%s '%res)
condition_res=False
if condition_res :
return True,','.join(str_check).replace(' V ','')
else:
return False,','.join(str_check).replace(' V ','')
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Line:'+str(lineno)+' E:'+str(e))).encode('utf-8'))
logging.warning('ERROR IN Superplay:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD Superplay')
return False,','.join(str_check).replace(' V ','')
def c_get_sources(name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local=False,fav_status='false',only_torrent='no',only_heb_servers='0'):
global stop_all,once_fast_play,silent_mode,stoped_play_once
try:
global all_s_in,stop_window,global_result,all_links_sources
import random
original_title=clean_name(original_title,1)
try:
import resolveurl
hostDict = resolveurl.relevant_resolvers(order_matters=True)
hostDict = [i.domains for i in hostDict if '*' not in i.domains]
hostDict = [i.lower() for i in reduce(lambda x, y: x+y, hostDict)]
hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
except Exception:
hostDict = []
premiered=isr
tmdbKey = '1248868d7003f60f2386595db98455ef'
if season!=None and season!="%20":
tv_movie='tv'
url2='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
else:
tv_movie='movie'
url2='http://api.themoviedb.org/3/movie/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
if 'tt' not in id:
try:
imdb_id=requests.get(url2).json()['external_ids']['imdb_id']
except:
imdb_id=" "
all_s_in=({},0,'','','')
da=[]
stop_window=False
da.append((name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local,fav_status))
logging.warning('da')
logging.warning(da)
if debug_mode==True:
logging.warning('Searching Sources')
if season!=None and season!="%20":
tv_movie='tv'
else:
tv_movie='movie'
if Addon.getSetting("new_server_dp")=='true' and silent_mode==False:
thread=[]
selected_option=Addon.getSetting("new_server_dp_option")
if selected_option=='3':
rand=(random.randint(0,299)/100)
selected_option=str(int(rand))
if selected_option=='0':
thread.append(Thread(start_window,id,tv_movie,heb_name,selected_option))
thread[len(thread)-1].setName('sources_s1')
elif selected_option=='1' or selected_option=='2':
thread.append(Thread(start_window2,id,tv_movie,heb_name,selected_option))
thread[len(thread)-1].setName('sources_s2')
thread[0].start()
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Searching Sources', '','')
dp.update(0, 'Please Wait','Searching Sources', '' )
if Addon.getSetting("trailer_dp")=="true" and Addon.getSetting("new_server_dp")=="false":
pDialog = xbmcgui.DialogProgressBG()
pDialog.create('Collecting')
#pDialog.update(0, message=' Please Wait ')
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
if Addon.getSetting("lang")=="1":
lang='en'
else:
lang='he'
url2=None
thread=[]
tv_mode=tv_movie
original_title=original_title.replace('%3a','')
all_sources=[]
if tv_movie=='movie':
fav_server_en=Addon.getSetting("fav_servers_en")
fav_servers=Addon.getSetting("fav_servers")
else:
fav_server_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
onlyfiles=[]
all_mag_s=[]
for f in listdir(mag_dir):
if isfile(join(mag_dir, f)):
all_mag_s.append(f)
if not ((Addon.getSetting("all_t")=='1' and Addon.getSetting("magnet")=='true') or only_torrent=='yes'):
onlyfiles = [f for f in listdir(done_dir) if isfile(join(done_dir, f))]
if Addon.getSetting("magnet")=='true' or only_torrent=='yes':
onlyfiles=onlyfiles+[f for f in listdir(mag_dir) if isfile(join(mag_dir, f))]
all_fv_servers=[]
if fav_status=='true' or fav_status=='rest':
if fav_server_en=='true':
all_fv_servers=fav_servers.split(',')
all_direct=[]
all_google=[]
all_rapid=[]
z=0
for items in onlyfiles:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles))),items,1,'')
if items !='cache.py' and items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cloudflare.py' and items!='Addon.py':
impmodule = __import__(items.replace('.py',''))
if items in all_mag_s:
type=['magnet']
else:
type=[]
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting("magnet")=='true' and Addon.getSetting(items.replace('.py',''))=="true" and 'magnet' in type:
all_fv_servers.append(items.replace('.py',''))
z+=1
'''
if Addon.getSetting("rdsource")=='true':
onlyfiles2 = [f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
f_result={}
name_check=''
z=0
for items in onlyfiles2:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles2))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles2))),items,1,'')
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cloudflare.py' and items!='Addon.py':
if fav_status=='true':
if items.replace('.py','') not in all_fv_servers:
continue
elif fav_status=='rest':
if items.replace('.py','') in all_fv_servers:
continue
impmodule = __import__(items.replace('.py',''))
type=['rd']
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting(items.replace('.py',''))=="true":
if name_check!='':
if items.replace('.py','')==name_check:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
else:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
z+=1
'''
else:
all_fv_servers=[]
if (Addon.getSetting("all_t")=='1' or only_torrent=='yes') and Addon.getSetting("magnet")=='true':
all_fv_servers=[]
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(0, 'Please Wait','Collecting', '' )
f_result={}
regular_s=True
if Addon.getSetting("rdsource")=='true' and Addon.getSetting("rd_only")=='true':
regular_s=False
if (Addon.getSetting("all_t")=='1' or only_torrent=='yes') and Addon.getSetting("magnet")=='true':
regular_s=False
if regular_s:
name_check=''
z=0
for items in onlyfiles:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles))),items,1,'')
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cache.py' and items!='Addon.py':
if fav_status=='true':
if items.replace('.py','') not in all_fv_servers:
continue
elif fav_status=='rest':
if items.replace('.py','') in all_fv_servers:
continue
impmodule = __import__(items.replace('.py',''))
if items in all_mag_s:
type=['magnet']
else:
type=[]
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting("magnet")=='false' and 'magnet' in type:
continue
if Addon.getSetting(items.replace('.py',''))=="true" or (Addon.getSetting("magnet")=='true' and ('magnet' in type) and Addon.getSetting(items.replace('.py',''))=="true"):
if name_check!='':
if items.replace('.py','')==name_check:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
else:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
z+=1
if Addon.getSetting("rdsource")=='true':
onlyfiles = [f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
name_check=''
z=0
for items in onlyfiles:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles))),items,1,'')
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cloudflare.py' and items!='Addon.py':
if fav_status=='true':
if items.replace('.py','') not in all_fv_servers:
continue
elif fav_status=='rest':
if items.replace('.py','') in all_fv_servers:
continue
impmodule = __import__(items.replace('.py',''))
type=['rd']
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting(items.replace('.py',''))=="true":
if name_check!='':
if items.replace('.py','')==name_check:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
else:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
else:
if Addon.getSetting("rdsource")=='true':
onlyfiles = [f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
f_result={}
name_check=''
z=0
for items in onlyfiles:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles))),items,1,'')
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cloudflare.py' and items!='Addon.py':
if fav_status=='true':
if items.replace('.py','') not in all_fv_servers:
continue
elif fav_status=='rest':
if items.replace('.py','') in all_fv_servers:
continue
impmodule = __import__(items.replace('.py',''))
type=['rd']
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting(items.replace('.py',''))=="true":
if name_check!='':
if items.replace('.py','')==name_check:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
else:
if tv_movie=='movie' and 'movie' in type:
all_sources.append((items.replace('.py',''),impmodule))
elif tv_movie=='tv' and 'tv' in type:
all_sources.append((items.replace('.py',''),impmodule))
z+=1
if (Addon.getSetting("all_t")=='1' and Addon.getSetting("magnet")=='true') or only_torrent=='yes' :
name_check=''
#onlyfiles = [f for f in listdir(done_dir) if isfile(join(done_dir, f))]
onlyfiles=[f for f in listdir(mag_dir) if isfile(join(mag_dir, f))]
z=0
for items in onlyfiles:
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl_temp.py' and items!='cloudflare.py' and items!='Addon.py':
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int((z*100.0)/(len(onlyfiles))), 'Please Wait','Collecting', items )
all_s_in=({},int((z*100.0)/(len(onlyfiles))),items,1,'')
impmodule = __import__(items.replace('.py',''))
type=['torrent']
type,source_scraper=get_type(impmodule,items.replace('.py',''))
if Addon.getSetting(items.replace('.py',''))=="true":
all_sources.append((items,impmodule))
z+=1
all_s_in=({},100,'',1,'')
for name1,items in all_sources:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(0, 'Please Wait','Searching', name1 )
if name_check!='':
if name1==name_check:
thread.append(Thread(get_links_new,hostDict,imdb_id,name1,type,items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id,premiered,False))
#thread.append(Thread(items.get_links,tv_movie,original_title,heb_name,season_n,episode_n,season,episode,show_original_year,id))
thread[len(thread)-1].setName(name1)
else:
thread.append(Thread(get_links_new,hostDict,imdb_id,name1,type,items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id,premiered,False))
#thread.append(Thread(items.get_links,tv_movie,original_title,heb_name,season_n,episode_n,season,episode,show_original_year,id))
thread[len(thread)-1].setName(name1)
if Addon.getSetting("trailer_dp")=="true" and Addon.getSetting("new_server_dp")=="false":
thread.append(Thread(play_trailer_f(id,tv_mode)))
thread[len(thread)-1].setName('Trailer')
if Addon.getSetting("subtitles")=='true':
thread.append(Thread(get_subs_trd,imdb_id,season,episode))
thread[len(thread)-1].setName('Subs')
start_time = time.time()
stop_all=0
zzz=0
for td in thread:
td.start()
if Addon.getSetting("server_test_one")=='true':
while td.is_alive():
elapsed_time = time.time() - start_time
dp.update(int(((zzz* 100.0)/(len(thread))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),td.name, 'Waiting')
xbmc.sleep(1000)
if dp.iscanceled():
dp.close()
return 0
zzz+=1
if Addon.getSetting("server_test_one")=='true':
return 0
if fav_status=='true' and Addon.getSetting("fav_search_time_en"):
max_time=int(Addon.getSetting("fav_search_time"))
else:
max_time=int(Addon.getSetting("time_s"))
num_live=0
tt={}
for i in range (0,(len(thread)+50)):
tt[i]="red"
string_dp=''
string_dp2=''
still_alive=0
if len(thread)==0:
xbmcgui.Dialog().ok('Error Occurred','[COLOR aqua][I] No servers were Found [/I][/COLOR]')
all_links_togther={}
check_lk=[]
while 1:
num_live=0
elapsed_time = time.time() - start_time
if 1:#for threads in thread:
elapsed_time = time.time() - start_time
num_live=0
string_dp=''
string_dp2=''
still_alive=0
count_2160=0
count_1080=0
count_720=0
count_480=0
count_rest=0
count_alive=0
all_alive={}
for yy in range(0,len(thread)):
all_alive[thread[yy].name]=thread[yy].is_alive()
if not thread[yy].is_alive():
num_live=num_live+1
tt[yy]="lightgreen"
else:
if string_dp2=='':
string_dp2=thread[yy].name
else:
count_alive+=1
string_dp2=string_dp2+','+thread[yy].name
still_alive=1
tt[yy]="red"
save_name=''
all_links_togther=all_links_sources
f_result=all_links_sources
living=[]
for items in all_alive:
if all_alive[items]:
living.append(items)
if count_alive>10:
string_dp2='Remaining Sources: '+str(count_alive)+' - '+random.choice (living)
count_found=0
try:
for data in f_result:
#for data in all_links_togther['links']:
if len (all_links_sources)>0:
count_found+=1
if 'links' in f_result[data] and len (f_result[data]['links'])>0 and data!='subs':
for links_in in f_result[data]['links']:
name1,links,server,res=links_in
new_res=0
if '2160' in res or '4k' in res.lower():
count_2160+=1
new_res=2160
if '1080' in res:
count_1080+=1
new_res=1080
elif '720' in res:
count_720+=1
new_res=720
elif '480' in res:
count_480+=1
new_res=480
else:
count_rest+=1
new_res=0
check_super=False
if 'magnet:' in links and allow_debrid:
check_super=True
#logging.warning('In magnet')
if Addon.getSetting("super_fast")=="true" and links not in check_lk and check_super and once_fast_play==0 and silent_mode==False:
check_lk.append(links)
check_r_l,str_check=get_condition(name1,links,server,new_res,tv_movie,f_result,data,original_title)
logging.warning(data)
logging.warning(check_r_l)
logging.warning(str_check)
f_ur=False
if check_r_l :
logging.warning('IN play')
ur=links
try:
f_ur=check_cached(links)
except Exception as e:
logging.warning('bad link in super:'+str(e)+' '+ur)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Bad Torrent in Super:'+str(e))).encode('utf-8'))
global_result='Bad Source Try Manually'
logging.warning('f_ur:'+str(f_ur))
if f_ur:
plot='-'+data+'-'
global_result='[COLOR gold][I][B] Playing '+data+'-'+str_check+'[/B][/I][/COLOR]'
try:
xbmc.Player().stop()
xbmc.sleep(100)
once_fast_play=1
if Addon.getSetting("new_window_type2")!='3':
play(name,ur,' ',' ',plot,show_original_year,season,episode,original_title,name1,heb_name,show_original_year,eng_name,'0',original_title,id)
else:
play(name1,ur,' ',' ',plot,show_original_year,season,episode,original_title,name1,heb_name,show_original_year,eng_name,isr,original_title,id,windows_play=False,auto_fast=True,auto_play=True,f_auto_play=True)
except Exception as e:
#once_fast_play=0
logging.warning('bad link in super2:'+str(e)+' '+ur)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'bad link in super:'+str(e))).encode('utf-8'))
global_result='Bad Source Try Manually'
except:
pass
global_result="4K: [COLOR gold]%s[/COLOR] 1080: [COLOR khaki]%s[/COLOR] 720: [COLOR gold]%s[/COLOR] 480: [COLOR silver]%s[/COLOR] Rest: [COLOR burlywood]%s[/COLOR]"%(count_2160,count_1080,count_720,count_480,count_rest)
if Addon.getSetting("trailer_dp")=="true" and Addon.getSetting("new_server_dp")=="false":
string_dp="4K: [COLOR gold]%s[/COLOR] 1080: [COLOR khaki]%s[/COLOR] 720: [COLOR gold]%s[/COLOR] 480: [COLOR silver]%s[/COLOR] Rest: [COLOR burlywood]%s[/COLOR]"%(count_2160,count_1080,count_720,count_480,count_rest)
pDialog.update(int(((num_live* 100.0)/(len(thread))) ), message=time.strftime("%H:%M:%S", time.gmtime(elapsed_time))+' '+string_dp)
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
total=count_1080+count_720+count_480+count_rest
string_dp="4K: [COLOR gold]%s[/COLOR] 1080: [COLOR khaki]%s[/COLOR] 720: [COLOR gold]%s[/COLOR] 480: [COLOR silver]%s[/COLOR] Rest: [COLOR burlywood]%s[/COLOR] T: [COLOR darksalmon]%s[/COLOR] ' '[COLOR gold]SF: %s[/COLOR]' '[COLOR lightcoral]SN: %s[/COLOR]'"%(count_2160,count_1080,count_720,count_480,count_rest,total,str(count_found),len(f_result)-count_found)
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int(((num_live* 100.0)/(len(thread))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),string_dp, string_dp2)
all_s_in=(f_result,int(((num_live* 100.0)/(len(thread))) ),string_dp2.replace('Remaining Sources: ',''),2,string_dp)
xbmc.sleep(100)
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
if dp.iscanceled():
dp_c=True
else:
dp_c=False
else:
dp_c=False
if dp_c or elapsed_time>max_time or stop_window:
stop_all=1
#for name1,items in all_sources:
# items.stop_all=1
logging.warning('Stoping NOW')
num_live2=0
for threads in thread:
all_s_in=(f_result,int(((num_live2* 100.0)/(len(thread))) ),'Closing',2,threads.name)
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int(((num_live2* 100.0)/(len(thread))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Closing', threads.name)
all_s_in=(f_result,int(((num_live2* 100.0)/(len(thread))) ),'Closing',2,threads.name)
if threads.is_alive():
threads._Thread__stop()
num_live2+=1
break
if still_alive==0:
break
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
if dp.iscanceled():
dp_c=True
else:
dp_c=False
else:
dp_c=False
if dp_c or elapsed_time>max_time or stop_window:
logging.warning('Stoping NOW 2')
for name1,items in all_sources:
items.stop_all=1
num_live2=0
for threads in thread:
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int(((num_live2* 100.0)/(len(thread))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Closing', threads.name)
all_s_in=(f_result,int(((num_live2* 100.0)/(len(thread))) ),'Closing',2,threads.name)
if threads.is_alive():
threads._Thread__stop()
num_live2+=1
break
xbmc.sleep(500)
counter=0
while 1:
alive=0
stop_all=1
count_all=len(threading.enumerate())
num_live2=0
for thread in threading.enumerate():
elapsed_time = time.time() - start_time
if (thread.isAlive()):
alive=1
thread._Thread__stop()
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.update(int(((num_live2* 100.0)/(count_all)) ), ' Please Wait2 '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Closing', thread.getName()+' - '+str(counter))
all_s_in=(f_result,int(((num_live2* 100.0)/(count_all)) ),'Closing2',2,thread.getName()+' - '+str(counter))
if alive==0 or counter>10:
break
counter+=1
xbmc.sleep(200)
if Addon.getSetting("trailer_dp")=="true" and Addon.getSetting("new_server_dp")=="false":
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Done Searching')).encode('utf-8'))
pDialog.close()
all_links_fp=[]
all_pre=[]
z=0
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.close()
f_subs=[]
return f_result,all_links_fp,all_pre,f_subs
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Line:'+str(lineno)+' E:'+str(e))).encode('utf-8'))
logging.warning('ERROR IN Looking sources:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD Looking Sources')
if Addon.getSetting("server_dp")=='true' and silent_mode==False:
dp.close()
def filter_servers(url):
url=url.lower()
skip=[]
if Addon.getSetting("re_rapid")=='true':
if 'rapidvideo' not in url:
skip.append(True)
else:
skip.append(False)
if Addon.getSetting("re_google")=='true':
if 'google' not in url and 'gdrive' not in url:
skip.append(True)
else:
skip.append(False)
if Addon.getSetting("re_direct")=='true':
if 'direct' not in url and 'sratim-il' not in url and 'cdn' not in url:
skip.append(True)
else:
skip.append(False)
if Addon.getSetting("re_open")=='true':
if 'openload' not in url and 'oload' not in url:
skip.append(True)
else:
skip.append(False)
if Addon.getSetting("re_stream")=='true':
if 'streamango' not in url:
skip.append(True)
if Addon.getSetting("re_magnet")=='true':
if '{p-' not in url:
skip.append(True)
else:
skip.append(False)
if Addon.getSetting("re_vidc")=='true':
if 'vidcloud' not in url:
skip.append(True)
else:
skip.append(False)
result=True
for items in skip:
result=result&items
return result
def get_rest_s(time_to_save, name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local):
global silent_mode
silent_mode=True
time_to_wait_for_rest=int(Addon.getSetting("time_to_wait_for_rest"))
time.sleep(time_to_wait_for_rest)
t=[]
t.append((time_to_save, name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local))
all_f_links,all_links_fp,all_pre,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local,'rest','no','0', table='pages')
return 0
def get_torrents(url):
return url
def resolver_supported():
resolvable_files_dir=os.path.join(xbmc.translatePath("special://home"),"addons", "script.module.resolveurl","lib","resolveurl","plugins")
onlyfiles = [f for f in listdir(resolvable_files_dir) if isfile(join(resolvable_files_dir, f))]
supported=[]
for items in onlyfiles:
if ".pyo" not in items and ".pyc" not in items and '__' not in items:
supported.append(items.replace('.py',''))
return (supported)
def get_rd_servers():
rd_domains=[]
if allow_debrid:
rd_domains=[]
try:
import real_debrid
rd = real_debrid.RealDebrid()
rd_domains=(rd.getRelevantHosters())
if len(rd_domains)==0:
Addon.setSetting('rd.client_id','')
rd.auth()
rd = real_debrid.RealDebrid()
rd_domains=(rd.getRelevantHosters())
except Exception as e:
logging.warning(e)
pass
if len (rd_domains)==0:
rd_domains=[u'4shared.com', u'rapidgator.net', u'sky.fm', u'1fichier.com', u'depositfiles.com', u'hitfile.net', u'filerio.com', u'solidfiles.com', u'mega.co.nz', u'scribd.com', u'flashx.tv', u'canalplus.fr', u'dailymotion.com', u'salefiles.com', u'youtube.com', u'faststore.org', u'turbobit.net', u'big4shared.com', u'filefactory.com', u'youporn.com', u'oboom.com', u'vimeo.com', u'redtube.com', u'zippyshare.com', u'file.al', u'clicknupload.me', u'soundcloud.com', u'gigapeta.com', u'datafilehost.com', u'datei.to', u'rutube.ru', u'load.to', u'sendspace.com', u'vidoza.net', u'tusfiles.net', u'unibytes.com', u'ulozto.net', u'hulkshare.com', u'dl.free.fr', u'streamcherry.com', u'mediafire.com', u'vk.com', u'uploaded.net', u'userscloud.com',u'nitroflare.com']
rd_domains.append('nitroflare.com')
rd_domains.append('rapidgator.net')
rd_domains.append('uploadgig.com')
return rd_domains
def undo_get_rest_data(full_str):
params=get_custom_params(full_str)
for items in params:
params[items]=params[items].replace(" ","%20")
url=None
name=None
mode=None
iconimage=None
fanart=None
description=' '
original_title=' '
fast_link=''
data=0
id=' '
saved_name=' '
prev_name=' '
isr=' '
season="%20"
episode="%20"
show_original_year=0
heb_name=' '
tmdbid=' '
eng_name=' '
dates=' '
data1='[]'
fav_status='false'
only_torrent='no'
only_heb_servers='0'
new_windows_only=False
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
description=urllib.unquote_plus(params["description"].encode('utf-8'))
except:
pass
try:
data=urllib.unquote_plus(params["data"])
except:
pass
try:
original_title=(params["original_title"])
except:
pass
try:
id=(params["id"])
except:
pass
try:
season=(params["season"])
except:
pass
try:
episode=(params["episode"])
except:
pass
try:
tmdbid=(params["tmdbid"])
except:
pass
try:
eng_name=(params["eng_name"])
except:
pass
try:
show_original_year=(params["show_original_year"])
except:
pass
try:
heb_name=urllib.unquote_plus(params["heb_name"])
except:
pass
try:
isr=(params["isr"])
except:
pass
try:
saved_name=clean_name(params["saved_name"],1)
except:
pass
try:
prev_name=(params["prev_name"])
except:
pass
try:
dates=(params["dates"])
except:
pass
try:
data1=(params["data1"])
except:
pass
try:
fast_link=urllib.unquote_plus(params["fast_link"])
except:
pass
try:
fav_status=(params["fav_status"])
except:
pass
try:
only_torrent=(params["only_torrent"])
except:
pass
try:
only_heb_servers=(params["only_heb_servers"])
except:
pass
try:
new_windows_only=(params["new_windows_only"])
new_windows_only = new_windows_only == "true"
except:
pass
return url,name,iconimage,mode,fanart,description,data,original_title,id,season,episode,tmdbid,eng_name,show_original_year,heb_name,isr,saved_name,prev_name,dates,data1,fast_link,fav_status,only_torrent,only_heb_servers,new_windows_only
def get_rest_data(name,url,mode,iconimage,fanart,description,video_info={},data=' ',original_title=' ',id=' ',season=' ',episode=' ',tmdbid=' ',eng_name=' ',show_original_year=' ',rating=0,heb_name=' ',isr=' ',generes=' ',trailer=' ',dates=' ',watched='no',fav_status='false'):
name=name.replace("|",' ')
description=description.replace("|",' ')
try:
te1=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode2="+str(mode)
te2="&name="+(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description.encode('utf8'))+"&heb_name="+(heb_name)+"&dates="+(dates)
te3="&data="+str(data)+"&original_title="+(original_title)+"&id="+(id)+"&season="+str(season)
te4="&episode="+str(episode)+"&tmdbid="+str(tmdbid)+"&eng_name="+(eng_name)+"&show_original_year="+(show_original_year)+"&isr="+str(isr)
u=te1 + te2 + te3 + te4.decode('utf8')+"&fav_status="+fav_status
except:
reload(sys)
sys.setdefaultencoding('utf8')
te1=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode2="+str(mode)
te2="&name="+(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description.encode('utf8'))+"&heb_name="+(heb_name)+"&dates="+(dates)
te3="&data="+str(data)+"&original_title="+(original_title)+"&id="+(id)+"&season="+str(season)
te4="&episode="+str(episode)+"&tmdbid="+str(tmdbid)+"&eng_name="+(eng_name)+"&show_original_year="+(show_original_year)+"&isr="+str(isr)
u=te1 + te2 + te3 + te4.decode('utf8')+"&fav_status="+fav_status
return u
def get_sources(name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates='',data1='[]',get_local=False,fast_link='',fav_status='false',only_torrent='no',only_heb_servers='0',new_windows_only=False,metaliq='false'):
global imdb_global,all_s_in,close_on_error,close_sources_now,once_fast_play
import urlparse
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
once_fast_play=0
o_plot=plot
rd_domains=[]
logging.warning(name+'-'+id)
logging.warning(isr)
logging.warning('isr3:'+isr)
if allow_debrid:
rd_domains=cache.get(get_rd_servers, 72, table='pages')
#rd_domains=requests.get('https://api.real-debrid.com/rest/1.0/hosts/domains').json()
if season!=None and season!="%20":
tv_movie='tv'
else:
tv_movie='movie'
try:
if 'tt' in id:
url3='https://api.themoviedb.org/3/find/%s?api_key=1248868d7003f60f2386595db98455ef&language=en-US&external_source=imdb_id'%id
xx=requests.get(url3).json()
if tv_movie=='tv':
if len(xx['tv_results'])>0:
id=str(xx['tv_results'][0]['id'])
else:
if len(xx['movie_results'])>0:
id=str(xx['movie_results'][0]['id'])
except Exception as e:
logging.warning(e)
pass
if '-Episode ' in plot and '-NEXTUP-' not in plot:
all_d=json.loads(urllib.unquote_plus(dates))
if len(all_d)<2:
all_d=['','','']
if all_d[0]==0:
choise=['Play Next Episode - '+all_d[2],'Play Current Episode - '+all_d[1],'Open Season Episodes','Open Season Selection']
elif all_d[2]==0:
choise=['Play Current Episode - '+all_d[1],'Play Previous Episode - '+all_d[0],'Open Season Episodes','Open Season Selection']
else:
if 'magenta' not in all_d[2]:
choise=['Play Next Episode - '+all_d[2],'Play Current Episode - '+all_d[1],'Play Previous Episode - '+all_d[0],'Open Season Episodes','Open Season Selection']
else:
choise=['[COLOR magenta]'+'Play Next Episode - '+'[/COLOR]'+all_d[2],'Play Current Episode - '+all_d[1],'Play Previous Episode - '+all_d[0],'Open Season Episodes','Open Season Selection']
if Addon.getSetting("tv_ep_window")=='true':
menu=[]
menu = Chose_ep('plugin.video.destinyds', heb_name,name,id,season,episode,dates,original_title)
menu.doModal()
ret = menu.params
del menu
else:
ret = xbmcgui.Dialog().select("Choose Episode", choise)
if ret!=-1:
if all_d[2]==0 or all_d[0]==0:
prev_index=1
else:
prev_index=2
if ret==0 and all_d[2]!=0:
episode=str(int(episode)+1)
from tmdb import get_episode_data
name,plot,image=get_episode_data(id,season,episode)
o_plot='Season %s Episode %s \n'%(season,episode)+plot
if ret==prev_index:
if int(episode)>1:
episode=str(int(episode)-1)
from tmdb import get_episode_data
name,plot,image=get_episode_data(id,season,episode)
o_plot='Season %s Episode %s \n'%(season,episode)+plot
if ret==(prev_index+1):
plot=plot.replace('-Episode ','')
xbmc.executebuiltin(('Container.update("plugin://plugin.video.destinyds/?name=%s&url=%s&iconimage=%s&fanart=%s&description=%s&data=%s&original_title=%s&id=%s&season=%s&tmdbid=%s&show_original_year=%s&heb_name=%s&isr=%s&mode2=8",return)'%(name,urllib.quote_plus(url),icon,image,urllib.quote_plus(plot),year,original_title,id,season,tmdbid,show_original_year,heb_name,isr)))
'''
get_episode(name,url,iconimage,image,plot,data,original_title,id,season,tmdbid,show_original_year,heb_name,isr)
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
return 0
'''
return 'ok',[]
sys.exit()
if ret==(prev_index+2):
plot=plot.replace('-Episode ','')
logging.warning('OPEN LAST')
xbmc.executebuiltin(('Container.update("plugin://plugin.video.destinyds/?name=%s&url=%s&iconimage=%s&fanart=%s&description=%s&data=%s&original_title=%s&id=%s&season=%s&tmdbid=%s&show_original_year=%s&heb_name=%s&isr=%s&mode2=7"),return'%(name,urllib.quote_plus(url),icon,image,urllib.quote_plus(plot),year,original_title,id,season,tmdbid,show_original_year,heb_name,isr)))
'''
get_seasons(name,url,iconimage,image,plot,data,original_title,id,heb_name,isr)
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
xbmcplugin.endOfDirectory(int(sys.argv[1]))
return 0
'''
return 'ok',[]
sys.exit()
else:
sys.exit()
return 'ENDALL',[]
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
logging.warning('2')
time_to_save=int(Addon.getSetting("save_time"))
search_done=0
#all_f_links=c_get_sources(name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local=False,fav_status=fav_status,only_torrent=only_torrent,only_heb_servers=only_heb_servers)
#logging.warning(all_f_links)
#sys.exit()
if 'Filtered sources' in name:
filter_mode=True
else:
filter_mode=False
if 'Rest of Results' in name:
filter_loc='rest'
rest_test=' Rest of Results '
else:
filter_loc='rest2'
rest_test=''
all_d_new=[]
logging.warning('3')
all_d_new.append((name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates,data1))
if tv_movie=='movie':
fav_search_f=Addon.getSetting("fav_search_f")
fav_servers_en=Addon.getSetting("fav_servers_en")
fav_servers=Addon.getSetting("fav_servers")
else:
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
logging.warning('4')
if fav_status!='rest':
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0):
fav_status='true'
else:
fav_status='false'
name=name.replace('[COLOR red]','').replace('[COLOR white]','').replace('[/COLOR]','')
o_year=year
if plot==None:
plot=' '
if 'NEXTUP' in plot:
nextup=True
else:
nextup=False
o_name=name
try:
if season!=None and season!="%20":
name=original_title
d=[]
d.append((name,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local,fav_status,only_torrent,only_heb_servers))
all_f_links,all_links_fp,all_pre,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local,fav_status,only_torrent,only_heb_servers, table='pages')
if tv_movie=='tv':
fav_first=Addon.getSetting("fav_search_rest_tv")
else:
fav_first=Addon.getSetting("fav_search_rest")
rest_of_data=[]
rest_found=0
if fav_status=='true' and Addon.getSetting("all_t")!='1' and only_torrent!='yes' and 'Magnet links' not in o_name :
found_links=0
for name_f in all_f_links:
if found_links==1:
break
if name_f!='subs' :
for name,link,server,quality in all_f_links[name_f]['links']:
found_links=1
break
if found_links==0 and fav_status=='true':
all_f_links,all_links_fp,all_pre,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local,'rest','no','0', table='pages')
rest_found=1
elif fav_status=='true' and fav_first=='true':
rest_of_data.append((time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local))
#thread[0].start()
a=1
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN Sources Search:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD Sources Search')
xbmcgui.Dialog().ok('Error Occurred',' Sources Search Error '+str(e))
close_sources_now=1
return 0
xbmcgui.Dialog().ok('Error Occurred',' Cache was Cleaned...Try Again '+str(e))
logging.warning('5')
next_ep=[]
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Ordering Sources', '','')
dp.update(0, 'Please Wait','Ordering Sources', '' )
all_s_in=({},0,'Ordering Sources',2,'')
start_time=time.time()
if get_local==False:
if season!=None and season!="%20":
episode1=str(int(episode)+1)
if len(episode1)==1:
episode_n1="0"+episode1
else:
episode_n1=episode1
from tmdb import get_episode_data
name1,plot1,image1=get_episode_data(id,season,episode1)
if name1!=' ':
f_name=''
addDir3( f_name+'[COLOR gold][I]Open Next Episode - %s[/I][/COLOR]'%episode1, url,4,icon,image1,plot1+'-NEXTUP-',data=year,original_title=original_title,season=season,episode=episode1,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates)
next_ep.append(get_rest_data( f_name+'[COLOR gold][I]Open Next Episode - %s[/I][/COLOR]'%episode1, url,4,icon,image1,plot1+'-NEXTUP-',data=year,original_title=original_title,season=season,episode=episode1,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates))
#xbmc.Player().stop()
logging.warning('6')
all_data=[]
video_data={}
video_data['title']=name
video_data['poster']=image
video_data['plot']=plot
video_data['icon']=icon
video_data['year']=year
if plot==None:
plot=' '
if Addon.getSetting("lang")=="1":
lang='en'
else:
lang='he'
url2=None
save_fav(id,tv_movie)
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Updating DB', '')
all_s_in=({},0,'Updating DB',2,'')
if tv_movie=='tv':
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' and type='%s' and season='%s' and episode='%s'"%(original_title.replace("'"," "),tv_movie,season,episode))
match = dbcur.fetchone()
logging.warning('hislink')
logging.warning(match)
if match==None:
dbcur.execute("INSERT INTO AllData Values ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (name.replace("'"," "),url,icon,image,plot.replace("'"," "),year,original_title.replace("'"," "),season,episode,id,eng_name.replace("'"," "),show_original_year,heb_name.replace("'"," "),isr,tv_movie))
dbcon.commit()
dbcur.execute("SELECT * FROM Lastepisode WHERE original_title = '%s' and type='%s'"%(original_title.replace("'"," "),tv_movie))
match = dbcur.fetchone()
if match==None:
dbcur.execute("INSERT INTO Lastepisode Values ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (name.replace("'"," "),url,icon,image,plot.replace("'"," "),year,original_title.replace("'"," "),season,episode,id,eng_name.replace("'"," "),show_original_year,heb_name.replace("'"," "),isr,tv_movie))
dbcon.commit()
else:
dbcur.execute("SELECT * FROM Lastepisode WHERE original_title = '%s' and type='%s' and season='%s' and episode='%s'"%(original_title.replace("'"," "),tv_movie,season,episode))
match = dbcur.fetchone()
if match==None:
dbcur.execute("UPDATE Lastepisode SET season='%s',episode='%s',image='%s',isr='%s' WHERE original_title = '%s' and type='%s'"%(season,episode,image,isr,original_title.replace("'"," "),tv_movie))
dbcon.commit()
#if nextup==False:
# xbmc.executebuiltin('Container.Refresh')
else:
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' and type='%s'"%(original_title.replace("'"," "),tv_movie))
match = dbcur.fetchone()
logging.warning('hislink')
logging.warning(match)
if match==None:
dbcur.execute("INSERT INTO AllData Values ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (name.replace("'"," "),url,icon,image,plot.replace("'"," "),year,original_title.replace("'"," "),season,episode,id,eng_name.replace("'"," "),show_original_year,heb_name.replace("'"," "),isr,tv_movie))
dbcon.commit()
logging.warning('Done hislink')
#else:
# dbcur.execute("UPDATE AllData SET season='%s',episode='%s' WHERE original_title = '%s' and type='%s'"%(season,episode,original_title,tv_movie))
# dbcon.commit()
plot_o1=plot
logging.warning('7')
all_only_heb={}
count_n=0
count_t=0
all_rd_s={}
all_torrent_s={}
all_rd_servers=[]
count_r=0
all_hebdub_servers=[]
all_removed=[]
all_lk=[]
if filter_mode:
dbcur.execute("SELECT * FROM %s"%filter_loc)
all_new = dbcur.fetchone()[0].decode('base64')
all_new=json.loads(all_new)
all_t_links=[]
all_heb_links=[]
duplicated=0
logging.warning('8')
all_mag={}
all_mag[0]=[]
all_lk2=[]
counter_hash=0
r_list=[]
page_index=0
checked_cached=0
if Addon.getSetting("check_cached")=='true' and allow_debrid:
try:
for name_f in all_f_links:
if name_f!='subs' :
for name,link,server,quality in all_f_links[name_f]['links']:
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Updating Links, Check Cached', name)
if link not in all_lk2:
all_lk2.append(link)
if 'magnet' in link:
try:
hash = str(re.findall(r'btih:(.*?)&', link)[0].lower())
except:
hash =link.split('btih:')[1]
all_mag[page_index].append(hash)
counter_hash+=1
if counter_hash>150:
page_index+=1
all_mag[page_index]=[]
counter_hash=0
logging.warning('all_mag:'+str(len(all_mag)))
all_hased=[]
logging.warning(page_index)
import real_debrid
rd = real_debrid.RealDebrid()
for items in all_mag:
hashCheck = rd.checkHash(all_mag[items])
for hash in hashCheck:
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Updating Links, Check Cached', hash)
if 'rd' in hashCheck[hash]:
if len(hashCheck[hash]['rd'])>0:
all_hased.append(hash)
for name_f in all_f_links:
index=0
if name_f!='subs' :
for name,link,server,quality in all_f_links[name_f]['links']:
if 'magnet' in link:
try:
hash = str(re.findall(r'btih:(.*?)&', link)[0].lower())
except:
hash =link.split('btih:')[1]
if hash in all_hased:
all_f_links[name_f]['links'][index]=['Cached '+name,link,server,quality]
index+=1
checked_cached=0
except Exception as e:
checked_cached=0
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin(u'Notification(%s,%s)' % ('Destiny of Deathstar', 'ERROR IN Cached Test:'+str(lineno)))
logging.warning('ERROR IN Cached Test:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD Cached Test')
for name_f in all_f_links:
if name_f!='subs' :
for name,link,server,quality in all_f_links[name_f]['links']:
if checked_cached==1 and Addon.getSetting("check_cached_r")=='true' and allow_debrid:
if 'magnet' in link and 'Cached ' not in name:
continue
if Addon.getSetting("shrink")=='true':
if link in all_lk:
duplicated+=1
continue
else:
all_lk.append(link)
if filter_mode:
if link not in all_new:
continue
else:
if "," in Addon.getSetting("unfilter"):
unfilter=Addon.getSetting("unfilter").split(",")
else:
if len(Addon.getSetting("unfilter"))>0:
unfilter=[Addon.getSetting("unfilter")]
else:
unfilter=[]
if Addon.getSetting("remove_all")=='true' and (name_f not in unfilter):
check=filter_servers(server)
if check:
all_removed.append(link)
continue
fixed_q=fix_q(quality)
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Updating Links, Duplicated - %s'%str(duplicated), name)
all_s_in=({},0,' Duplicated - %s'%str(duplicated),2,name)
se='-%s-'%name_f
if all_f_links[name_f]['torrent']==True:
if link in all_t_links:
continue
all_t_links.append(link)
all_torrent_s[count_t]={}
all_torrent_s[count_t]['name']=name.decode('utf-8','ignore')
all_torrent_s[count_t]['link']=link.decode('utf-8','ignore')
all_torrent_s[count_t]['server']=server.decode('utf-8','ignore')
all_torrent_s[count_t]['quality']=quality.decode('utf-8','ignore')
all_torrent_s[count_t]['icon']=icon.decode('utf-8','ignore')
all_torrent_s[count_t]['image']=image.decode('utf-8','ignore')
all_torrent_s[count_t]['plot']='[COLOR gold]'+server.decode('utf-8','ignore')+'[/COLOR]\n'+plot.decode('utf-8','ignore')
all_torrent_s[count_t]['year']=year.decode('utf-8','ignore')
all_torrent_s[count_t]['season']=season.decode('utf-8','ignore')
all_torrent_s[count_t]['episode']=episode.decode('utf-8','ignore')
all_torrent_s[count_t]['id']=id
all_torrent_s[count_t]['name_f']=name_f
#all_torrent_s[count_t]['color']=all_f_links[name_f]['color']
count_t+=1
elif all_f_links[name_f]['rd']==True:
all_rd_s[count_r]={}
all_rd_s[count_r]['name']=name
all_rd_s[count_r]['link']=link
all_rd_s[count_r]['server']=server
all_rd_s[count_r]['quality']=quality
all_rd_s[count_r]['icon']=icon
all_rd_s[count_r]['image']=image
all_rd_s[count_r]['plot']=plot
all_rd_s[count_r]['year']=year
all_rd_s[count_r]['season']=season
all_rd_s[count_r]['episode']=episode
all_rd_s[count_r]['id']=id
all_rd_s[count_r]['name_f']=name_f
#all_rd_s[count_r]['color']=all_f_links[name_f]['color']
count_r+=1
#pre=all_pre[all_links_fp.index(link)]
if name_f not in all_rd_servers:
all_rd_servers.append(name_f)
else:
plot=plot_o1
#pre=all_pre[all_links_fp.index(link)]
check=False
if 1:
pre=''
if '-magnet-' in server:
se=' magnet '+se
color='gold'
else:
color='white'
all_data.append(('[COLOR %s][%s] '%(color,name_f)+name.decode('utf8','ignore')+" - "+server+'[/COLOR]'+' sss '+name_f+' sss ', str(link),icon,image,plot,show_original_year,quality,se,fixed_q,name,pre,server))
logging.warning('9')
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Ordering Links', ' ')
all_s_in=({},0,'Ordering Links',2,'')
if Addon.getSetting("order_torrents_new")=='true' and (Addon.getSetting("all_t")=='1' or only_torrent=='yes' or 'Magnet links' in o_name) and (Addon.getSetting("magnet")=='true' or only_torrent=='yes'):
regex='{P-(.+?)/S-(.+?)}'
all_data2=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_data:
peers=0
seeds=0
seeds_pre=re.compile(regex).findall(name)
if len(seeds_pre)>0:
seeds=seeds_pre[0][1].replace(' ','')
peers=re.compile(regex).findall(name)[0][0].replace(' ','')
seeds=seeds.replace(',','')
peers=peers.replace(',','')
try:
a=int(seeds)
except:
seeds=0
all_data2.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,int(seeds),peers,supplier))
all_data2=sorted(all_data2, key=lambda x: x[11], reverse=True)
all_data=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,seeds,peers,supplier in all_data2:
all_data.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier))
else:
all_fv=[]
all_rest=[]
if Addon.getSetting("fav_servers_en")=='true' and tv_movie=='movie':
all_fv_servers=Addon.getSetting("fav_servers").split(',')
elif Addon.getSetting("fav_servers_en_tv")=='true' and tv_movie=='tv':
all_fv_servers=Addon.getSetting("fav_servers_tv").split(',')
else:
all_fv_servers=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_data:
if server.replace('-','') in all_fv_servers:
all_fv.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier))
else:
all_rest.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier))
all_fv=sorted(all_fv, key=lambda x: x[8], reverse=False)
all_rest=sorted(all_rest, key=lambda x: x[8], reverse=False)
all_data=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_fv:
all_data.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier))
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_rest:
all_data.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier))
logging.warning('10')
if get_local==True:
return all_data,rest_of_data
all_in=[]
if imdb_global==None:
imdb_global=id
else:
if 'tt' not in imdb_global:
imdb_global=id
magnet_ofresult=''
if Addon.getSetting("magnet")=='true' and filter_loc!='rest':
if (Addon.getSetting("all_t")=='2' and only_torrent!='yes' )and 'Magnet Links' not in o_name :
dbcur.execute("DELETE FROM torrents")
dbcon.commit()
dbcur.execute("INSERT INTO torrents Values ('%s')"%(json.dumps(all_torrent_s).encode('base64')))
dbcon.commit()
magnet_ofresult=get_rest_data( '[COLOR aqua][I]Magnet Links -(%s)[/I][/COLOR]'%len(all_t_links), 'torrents',4,icon,image,plot+'-NEXTUP-',data=o_year,original_title=original_title,season=season,episode=episode,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates,fav_status=fav_status)
#addDir3( '[COLOR aqua][I]Magnet links -(%s)[/I][/COLOR]'%len(all_torrent_s), 'torrents',21,icon,image,plot,data=year,original_title=json.dumps(all_subs),season=season,episode=episode,id=imdb_global)
#all_d_new.append((name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates,data1))
addDir3( '[COLOR aqua][I]Magnet Links -(%s)[/I][/COLOR]'%len(all_t_links), 'torrents',4,icon,image,plot,data=o_year,original_title=original_title,season=season,episode=episode,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates,fav_status=fav_status)
#elif (Addon.getSetting("all_t")=='1' or only_torrent=='yes') :
# play_by_subs('[COLOR aqua][I]Magnet links -(%s)[/I][/COLOR]'%len(all_torrent_s),json.dumps(all_torrent_s),icon,image,name.decode('utf8','ignore')+plot.decode('utf8','ignore'),year,json.dumps(all_subs),season,episode,imdb_global,'','',original_title,one_list=True)
playingUrlsList = []
t=0
'''
if Addon.getSetting("auto_enable")=='true':
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_data:
t=t+1
if plot==None:
plot=' '
playingUrlsList.append(link+'$$$$$$$'+server+'$$$$$$$'+q+'$$$$$$$'+saved_name+'$$$$$$$'+'[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot)
'''
logging.warning('11')
if Addon.getSetting("chapi_enable")=='true' :
if season!=None and season!="%20":
url2='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,'1248868d7003f60f2386595db98455ef')
else:
url2='http://api.themoviedb.org/3/movie/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,'1248868d7003f60f2386595db98455ef')
try:
imdb_id=requests.get(url2).json()['external_ids']['imdb_id']
except:
imdb_id=" "
url_ch=''
if season!=None and season!="%20":
url_pre='http://thetvdb.com/api/GetSeriesByRemoteID.php?imdbid=%s&language=en'%imdb_id.replace('tt','')
html2=requests.get(url_pre).content
pre_tvdb = str(html2).split('<seriesid>')
if len(pre_tvdb) > 1:
tvdb = str(pre_tvdb[1]).split('</seriesid>')
url_ch='plugin://plugin.video.chappaai/tv/play/%s/%s/%s/library'%(tvdb[0],season,episode)
else:
url_ch=('plugin://plugin.video.%s/movies/play/imdb/%s/library'%(Addon.getSetting("metaliq_version_for_s"),imdb_id))
if url_ch!='':
addLink( "Open in Metalliq",url_ch,41,False,' ',' ',"Open in Metalliq")
if Addon.getSetting("auto_enable")=='true':
addLink( 'Auto Play', json.dumps(playingUrlsList),6,False,icon,image,plot,data=year,original_title=original_title.replace("%20"," "),season=season,episode=episode,id=id,saved_name=original_title,prev_name=o_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year,isr=isr)
if Addon.getSetting("debugmode")=='true':
try:
a=1
except:
pass
rest_ofresult=[]
rd_ofresult=[]
if fav_status=='true' and rest_found==0 and only_torrent!='yes' and 'Magnet Links' not in o_name :
rest_ofresult=get_rest_data('Rest of Results',all_d_new[0][1],4,all_d_new[0][2],all_d_new[0][3],all_d_new[0][4]+'-NEXTUP-',data=all_d_new[0][5],original_title=all_d_new[0][6],season=all_d_new[0][7],episode=all_d_new[0][8],id=all_d_new[0][9],heb_name=all_d_new[0][12],eng_name=all_d_new[0][10],show_original_year=all_d_new[0][11],isr=all_d_new[0][13],dates=all_d_new[0][14],fav_status='rest')
addDir3('Rest of Results',all_d_new[0][1],4,all_d_new[0][2],all_d_new[0][3],all_d_new[0][4]+'-NEXTUP-',data=all_d_new[0][5],original_title=all_d_new[0][6],season=all_d_new[0][7],episode=all_d_new[0][8],id=all_d_new[0][9],heb_name=all_d_new[0][12],eng_name=all_d_new[0][10],show_original_year=all_d_new[0][11],isr=all_d_new[0][13],dates=all_d_new[0][14],fav_status='rest')
if Addon.getSetting("remove_all")=='true' and len(all_removed)>0 and filter_mode==False:
dbcur.execute("DELETE FROM %s"%filter_loc)
dbcon.commit()
dbcur.execute("INSERT INTO %s Values ('%s')"%(filter_loc,json.dumps(all_removed).encode('base64')))
dbcon.commit()
addDir3('Filtered Sources -(%s)'%len(all_removed)+rest_test,all_d_new[0][1],4,all_d_new[0][2],all_d_new[0][3],all_d_new[0][4],data=all_d_new[0][5],original_title=all_d_new[0][6],season=all_d_new[0][7],episode=all_d_new[0][8],id=all_d_new[0][9],heb_name=all_d_new[0][12],eng_name=all_d_new[0][10],show_original_year=all_d_new[0][11],isr=all_d_new[0][13],dates=all_d_new[0][14],fav_status=fav_status)
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Playlist.GetItems", "params": { "properties": [ "showlink", "showtitle", "season", "title", "artist" ], "playlistid": 1}, "id": 1}')
j_list=json.loads(result)
if 'RD Links' not in o_name and Addon.getSetting("rd_menu_enable")=='true' and Addon.getSetting("rdsource")=='true':
addDir3( 'RD Links', 'RD Links',4,icon,image,plot,data=o_year,original_title=original_title,season=season,episode=episode,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates,fav_status=fav_status)
rd_ofresult=get_rest_data( 'RD Links', 'RD Links',4,icon,image,plot+'-NEXTUP-',data=o_year,original_title=original_title,season=season,episode=episode,id=id,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=dates,fav_status=fav_status)
if Addon.getSetting("new_source_menu")=='false':
if (Addon.getSetting("fast_play2_tv")=='true' and tv_movie=='tv') or (Addon.getSetting("fast_play2_movie")=='true' and tv_movie=='movie'):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
if (Addon.getSetting("fast_play2_tv")=='true' and tv_movie=='tv') and Addon.getSetting("new_source_menu")=='false':
addDir3( 'Season Episodes','www',102,icon,image,plot,data=year,original_title=original_title,id=id,season=season,tmdbid=id,show_original_year=year,heb_name=heb_name,isr=isr)
once=0
all_lists=[]
f_link2=''
m=[]
n=[]
n_magnet=[]
if len(rest_ofresult)>0:
n.append(rest_ofresult)
if len(magnet_ofresult)>0:
n_magnet.append(magnet_ofresult)
r_results=[]
if len(rd_ofresult)>0:
r_results.append(rd_ofresult)
count_magnet=0
all_items=[]
f_plot=''
max_q='99'
max_q_t=['2160','1080','720','480','360']
if Addon.getSetting("auto_q_source")=='true':
if tv_movie=='tv':
max_q=Addon.getSetting("max_quality_t")
else:
max_q=Addon.getSetting("max_quality_m")
max_q_v=max_q_t[int(max_q)]
logging.warning('12')
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier in all_data:
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Loading links - Duplicates - %s'%str(duplicated), name)
all_s_in=({},0,'Loading links - Duplicates - %s'%str(duplicated),2,name)
if server==None:
server=' '
q=q.replace('p','').replace('4K','2160').replace('4k','2160')
try:
a=int(q)
except:
q='0'
if max_q!='99':
if int(q)>int(max_q_v):
continue
if q==None:
q=' '
if plot==None:
plot=' '
name=name.replace("|"," ").replace(" "," ").replace("\n","").replace("\r","").replace("\t","").strip()
if fast_link!='':
if link==fast_link:
all_f_data=((name,fast_link,icon,image,'[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot,o_year,season,episode,original_title,heb_name,show_original_year,eng_name,isr,id))
f_link2=('%s?name=%s&mode2=5&url=%s&data=%s&season=%s&episode=%s&original_title=%s&saved_name=%s&heb_name=%s&show_original_year=%s&eng_name=%s&isr=%s&id=%s&description=%s&iconimage=%s&fanart=%s'%(sys.argv[0],name,urllib.quote_plus(fast_link),o_year,season,episode,original_title,name,heb_name,show_original_year,eng_name,isr,id,urllib.quote_plus(('[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot).encode('utf8')),icon,image))
if 1:#(Addon.getSetting("fast_play2_tv")=='true' and tv_movie=='tv') or (Addon.getSetting("fast_play2_movie")=='true' and tv_movie=='movie'):
#if Addon.getSetting("new_source_menu")=='false':
link2=('%s?name=%s&mode=5&url=%s&data=%s&season=%s&episode=%s&original_title=%s&saved_name=%s&heb_name=%s&show_original_year=%s&eng_name=%s&isr=%s&id=%s&description=%s'%(sys.argv[0],name,urllib.quote_plus(link),o_year,season,episode,original_title,name,heb_name,show_original_year,eng_name,isr,id,urllib.quote_plus(('[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot).encode('utf8'))))
added=''
listItem=xbmcgui.ListItem(added+'[COLOR gold]'+str(q)+'[/COLOR]|[COLOR magenta]'+server+'[/COLOR]|[COLOR gold]'+supplier.replace("Openload","vumoo")+'|[/COLOR]'+clean_name(name,1), iconImage=icon, thumbnailImage=image,path=link2)
listItem.setInfo('video', {'Title': name})
playlist.add(url=link2, listitem=listItem)
all_lists.append(listItem)
if 'RD Links' in o_name:
if 'magnet' not in server:
try:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
if host not in rd_domains:
continue
except:
pass
if 'Magnet links' in o_name and 'magnet' not in server:
continue
size='0'
if 'magnet' in server:
saved_name=name.split('{P')[0]
regex=' -.+?- (.+?) GB'
try:
size=re.compile(regex).findall(name)[0]
#size=size.split('-')[2]
except:
size='0'
max_size=int(Addon.getSetting("size_limit"))
try:
size=re.findall("[-+]?\d*\.\d+|[-+]?\d+", size)[0]
if float(size)>max_size:
continue
except:
pass
regex='{P-(.+?)/S-(.+?)}'
try:
seeds=re.compile(regex).findall(name)[0][1].replace(' ','')
peers=re.compile(regex).findall(name)[0][0].replace(' ','')
try:
s=int(seeds)
except:
seeds='0'
try:
s=int(peers)
except:
peers='0'
seeds=seeds.replace(',','')
peers=peers.replace(',','')
except:
peers='0'
seeds='0'
pass
if int(seeds)>=int(Addon.getSetting("min_seed")):
if peers=='0' and seeds=='0':
server='magnet - [COLOR lightgreen]%sGB[/COLOR]-'%(size)
else:
server='magnet - S%s - [COLOR lightgreen]%sGB[/COLOR]-'%(seeds,size)
count_magnet+=1
else:
continue
tes_mag=re.compile('- P(.+?)/S(.+?) -').findall(server)
if ('magnet' in server or len(tes_mag)>0) and (Addon.getSetting("all_t")=='2' ) and Addon.getSetting("magnet")=='true' and 'Magnet links' not in o_name :
if only_torrent!='yes':
continue
if len(Addon.getSetting("ignore_ser"))>0:
ignore_server=Addon.getSetting("ignore_ser").split(",")
ignore=0
for items in ignore_server:
if items.lower() in name.lower():
ignore=1
break
if ignore==1:
continue
#if Addon.getSetting("rd_menu_enable")=='true' and Addon.getSetting("rdsource")=='true' and 'RD SOURCE' in server:
# continue
if ('magnet:' in link and allow_debrid):
server='[COLOR gold] ☻ RD ☻ '+server+'[/COLOR]'
if allow_debrid and '//' in link:
try:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
if host in rd_domains:
server='[COLOR gold] ☻ RD ☻ '+server+'[/COLOR]'
except:
pass
pre=0
if 1:
#regex='\] (.+?)-'
#o_name1=re.compile(regex).findall(name)[0].replace('%20','.').replace(' ','.')
if Addon.getSetting("source_sim")=='true':
if pre==0:
n1='[COLOR lightblue]'+server+'[/COLOR] '+'[COLOR lightgreen]◄'+q+'►[/COLOR]'
else:
n1='[COLOR gold][I]'+str(pre)+'%[/I][/COLOR] '+'[COLOR lightblue]'+server+'[/COLOR] '+'[COLOR lightgreen]◄'+q+'►[/COLOR]'
p1=name+'\n[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot
else:
if pre==0:
n1= name
else:
n1='[COLOR gold][I]'+str(pre)+'%[/I][/COLOR]-'+ name
p1='[COLOR gold]'+q+'[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot
if only_torrent=='yes':
name=remove_color(name)
name=name.replace('magnet_','').replace('.py','')
n1=('[COLOR gold]'+str(pre)+'%[/COLOR] [COLOR gold]◄'+q+'►[/COLOR][COLOR lightblue][/COLOR] '+name)
if ((Addon.getSetting("new_source_menu")=='true' and only_torrent!='yes' ) or new_windows_only) and f_link2=='':
name=name
p1=p1
m.append((name,link,icon,image,p1,show_original_year,q,server,q,saved_name,pre,supplier,size+' GB'))
f_plot=p1
all_items.append(addLink(n1, link,5,False,icon,image,p1,data=o_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=o_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year,collect_all=True,isr=isr))
else:
all_items.append(addLink(n1, link,5,False,icon,image,p1,data=o_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=o_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year,collect_all=True,isr=isr))
else:
if only_torrent=='yes':
name=remove_color(name)
name=('[COLOR gold]'+str(pre)+'%[/COLOR] [COLOR gold] ◄'+q+'► [/COLOR][COLOR lightblue] [/COLOR] '+name+'$$$$$$$'+link)
if ((Addon.getSetting("new_source_menu")=='true' and only_torrent!='yes') or new_windows_only) and f_link2=='':
name=name
f_plot='[COLOR lightgreen]◄'+q+'►[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot
f_plot=f_plot
m.append((name,link,icon,image,f_plot,show_original_year,q,server,q,saved_name,pre,supplier,size+' GB'))
all_items.append(addLink( name, link,5,False,icon,image,'[COLOR lightgreen]◄'+q+'►[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot,data=o_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=o_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year,collect_all=True,isr=isr))
else:
all_items.append(addLink( name, link,5,False,icon,image,'[COLOR lightgreen]◄'+q+'►[/COLOR]\n[COLOR lightblue]'+server+'[/COLOR]\n'+plot,data=o_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=o_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year,collect_all=True,isr=isr))
logging.warning('13')
if new_windows_only==False or f_link2!='' or only_torrent=='yes' or once_fast_play==1:
xbmcplugin .addDirectoryItems(int(sys.argv[1]),all_items,len(all_items))
all_s_in=( {},100 ,'',4,'')
if once_fast_play>0:
close_sources_now=1
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp.close()
if ((Addon.getSetting("fast_play2_tv")=='true' and tv_movie=='tv') or (Addon.getSetting("fast_play2_movie")=='true' and tv_movie=='movie')) and Addon.getSetting("new_source_menu")=='false':
a=1
elif f_link2!='':
logging.warning('PLAY MEDIA')
#xbmc.executebuiltin(('XBMC.PlayMedia("%s")'%f_link2))
name,fast_link,iconimage,image,description,data,season,episode,original_title,heb_name,show_original_year,eng_name,isr,id=all_f_data
play(name,fast_link,iconimage,image,description,data,season,episode,original_title,name,heb_name,show_original_year,eng_name,isr,original_title,id,windows_play=True,auto_fast=False,nextup=True)
logging.warning('DONE PLAY MEDIA')
return 990,rest_of_data
search_done=1
check=False
if (tv_movie=='tv' and Addon.getSetting("video_in_sources_tv")=='true') or (tv_movie=='movie' and Addon.getSetting("video_in_sources")=='true'):
check=True
if (Addon.getSetting("trailer_wait")=='true' and Addon.getSetting("trailer_dp")=='true') or (Addon.getSetting("video_in_s_wait")=='true' and check and Addon.getSetting("new_server_dp")=='true'):
while xbmc.Player().isPlaying():
xbmc.sleep(100)
if Addon.getSetting("torrent_warning")=='true' and Addon.getSetting("magnet")=='true' and Addon.getSetting("rdsource")=='false':
xbmcgui.Dialog().ok('Warning', 'Using TORRENTS without RD or VPN is not recommended in some countries\n this warning can be disabled in the settings')
logging.warning('once_fast_play33:'+str(once_fast_play))
check_show=False
if not xbmc.Player().isPlaying:
check_show=True
elif once_fast_play==0:
check_show=True
if (Addon.getSetting("new_source_menu")=='true' and only_torrent!='yes' and check_show ) or new_windows_only :
if f_link2=='':
if len(dates)>0 and dates!='%20' and dates!='"%20"' and metaliq=='false':
l=get_rest_data('Series Tracker'.decode('utf8'),'tv',32,domain_s+'pbs.twimg.com/profile_images/873323586622078976/Z0BfwrYm.jpg',' ','Watched series'.decode('utf8'),isr=isr)
xbmc.executebuiltin('Container.Refresh(%s)'%l)
#xbmc.executebuiltin('Container.Refresh()')
res=new_show_sources(m,o_year,o_plot,eng_name,episode,image,heb_name,icon,id,prev_name,original_title,season,show_original_year,n,rest_of_data,n_magnet,r_results,str(count_magnet),next_ep,str(len(all_heb_links)),only_torrent,isr)
if res=='END':
return 'ENDALL',[]
status_pl='0'
logging.warning('14')
#sys.exit()
if 'items' in (j_list['result']):
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Playlist.Clear", "params": { "playlistid": 0 }, "id": 1}')
if (Addon.getSetting("fast_play2_tv")=='true' and tv_movie=='tv') or (Addon.getSetting("fast_play2_movie")=='true' and tv_movie=='movie'):
if Addon.getSetting("new_source_menu")=='false' and check_show==False:
if 'items' in (j_list['result']) and once_fast_play==0:
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Playlist.Clear", "params": { "playlistid": 0 }, "id": 1}')
if Addon.getSetting("new_source_menu")=='false':
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp.close()
xbmc.Player().stop()
if not 'items' in (j_list['result']):
xbmc.Player().play(playlist,windowed=False)
if Addon.getSetting("src_disp")=='false':
status_pl='ENDALL'
#ok=xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=all_lists[0])
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp.close()
if Addon.getSetting("dp")=='true' and silent_mode==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Enjoy', ' ')
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp.close()
return status_pl,rest_of_data
def auto_play(name,urls,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id):
year=show_original_year
image=fanart
plot=description
icon=iconimage
all_data=[]
z=0
all_links=json.loads(urls)
is_playing=False
for link in all_links:#for name2,link,icon,image,plot,year,q,server,f_q in all_data:
if is_playing:
break
server=link.split("$$$$$$$")[1]
q=link.split("$$$$$$$")[2]
name=link.split("$$$$$$$")[3]
plot=urllib.unquote_plus(link.split("$$$$$$$")[4].decode('utf8'))
link=link.split("$$$$$$$")[0]
try:
if '-Sdarot' not in plot:
r=play(name,link,iconimage,fanart,plot,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,auto_play=True)
if r=='ok':
while not xbmc.Player().isPlaying():
xbmc.sleep(100) #wait until video is being played
time.sleep(5)
if xbmc.Player().isPlaying():
mode2=1999
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
is_playing==True
sys.exit()
except Exception as e:
logging.warning(e)
if Addon.getSetting("dp")=='true' and silent_mode==False:
dp.update(int(z/(len(all_links)*100.0)),str(server)+"-"+q,str(z)+'/'+str(len(all_links)),str(e))
z=z+1
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
def get_redirect(url):
try:
if KODI_VERSION>=17:
request = HeadRequest(url)
response = urllib2.urlopen(request)
new_url=response.geturl()
return new_url
else:
return url
except:
return url
def get_imdb_data(info,name_o,image,source,type):
tmdbKey = '1248868d7003f60f2386595db98455ef'
name=name_o
imdb_id=''
icon=image
fanart=image
plot=''
rating=''
genere=' '
check=False
if source=='jen2':
check=True
elif Addon.getSetting("jen_tmdb")=='false':
check=True
if check:
return name,imdb_id,icon,fanart,plot,rating,genere
if 'title' in info:
a=info['title']
else:
info['title']=name_o.replace('.',' ')
if len(info['title'])>0:
a=a
else:
info['title']=name_o.replace('.',' ')
if 1:
if 'year' in info:
tmdb_data="https://api.tmdb.org/3/search/%s?api_key=%s&query=%s&year=%s&language=en&append_to_response=external_ids"%(type,tmdbKey,urllib.quote_plus(info['title']),info['year'])
year_n=info['year']
else:
tmdb_data="https://api.tmdb.org/3/search/%s?api_key=%s&query=%s&language=en&append_to_response=external_ids"%(type,tmdbKey,urllib.quote_plus(info['title']))
all_data=requests.get(tmdb_data).json()
if 'results' in all_data:
if len(all_data['results'])>0:
if (all_data['results'][0]['id'])!=None:
url='https://api.themoviedb.org/3/%s/%s?api_key=%s&language=en&append_to_response=external_ids'%(type,all_data['results'][0]['id'],tmdbKey)
try:
all_d2=requests.get(url).json()
imdb_id=all_d2['external_ids']['imdb_id']
except:
imdb_id=" "
genres_list= []
if 'genres' in all_d2:
for g in all_d2['genres']:
genres_list.append(g['name'])
try:genere = u' / '.join(genres_list)
except:genere=''
try:
if 'title' in all_data['results'][0]:
name=all_data['results'][0]['title']
else:
name=all_data['results'][0]['name']
rating=all_data['results'][0]['vote_average']
try:
icon=domain_s+'image.tmdb.org/t/p/original/'+all_data['results'][0]['poster_path']
fanart=domain_s+'image.tmdb.org/t/p/original/'+all_data['results'][0]['backdrop_path']
except:
pass
plot=all_data['results'][0]['overview']
except Exception as e:
logging.warning(e)
name=info['title']
fanart=' '
icon=' '
plot=' '
else:
name=name_o
fanart=image
icon=image
plot=' '
else:
name=name_o
fanart=image
icon=image
plot=' '
return name,imdb_id,icon,fanart,plot,rating,genere
def get_qu(url):
def gdecom(url):
import StringIO ,gzip
compressedFile = StringIO.StringIO()
compressedFile.write(url.decode('base64'))
# # Set the file's current position to the beginning
# of the file so that gzip.GzipFile can read
# its contents from the top.
#
compressedFile.seek(0)
return gzip.GzipFile(fileobj=compressedFile, mode='rb').read()
tmdb_cacheFile = os.path.join(tmdb_data_dir, '4k.db')
dbcon_tmdb = database.connect(tmdb_cacheFile)
dbcur_tmdb = dbcon_tmdb.cursor()
dbcon_tmdb.commit()
dbcur_tmdb.execute("SELECT * FROM MyTable")
match = dbcur_tmdb.fetchall()
for index,name,link,icon,fanart,plot,data,date,year,genre,father,type in match:
data=data.replace('[',' ').replace(']',' ').replace(' ','').replace("\\"," ").replace(': """",',': "" "",').replace(': """"}',': "" ""}').replace(': "",',': " ",').replace(': ""}',': " "}').replace('""','"').replace('\n','').replace('\r','')
try:
data2=json.loads(data)
original_title=data2['originaltitle']
imdb_id=data2['imdb']
rating=data2['rating']
generes=data2['genre']
except:
original_title=name
imdb_id=" "
rating=" "
generes=" "
addLink( name, gdecom(link),5,False,icon,fanart,'[COLOR gold]'+'4K'+'[/COLOR]\n[COLOR lightblue]'+'-NEW K-'+'[/COLOR]\n'+plot,data=year,original_title=original_title,id=imdb_id,rating=rating,generes=generes,show_original_year=year,saved_name=name)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING)
def fix_name(name_o):
regex_c='\[COLOR(.+?)\]'
match_c=re.compile(regex_c).findall(name_o)
if len(match_c)>0:
for items in match_c:
name_o=name_o.replace('[COLOR%s]'%items,'')
name_o=name_o.replace('=',' ').replace('[B]','').replace('[/B]','').replace('silver','').replace('lightseagreen','').replace('[','').replace(']','').replace('/COLOR','').replace('COLOR','').replace('4k','').replace('4K','').strip().replace('(','.').replace(')','.').replace(' ','.').replace('..','.')
return name_o
def get_data(i,url2,headers):
global matches
try:
x=requests.get(url2,headers=headers,timeout=3).content
regex_pre='<item>(.+?)</item>'
match_pre=re.compile(regex_pre,re.DOTALL).findall(x)
match=[]
for items in match_pre:
regex_link='<link>(.+?)</link'
match_link=re.compile(regex_link,re.DOTALL).findall(items)
if len(match_link)>0:
if len (match_link)>1:
match_link2=''
for link_in in match_link:
if match_link2=='':
match_link2=link_in
else:
match_link2=match_link2+'$$$'+link_in
match_link=match_link2
else:
match_link=match_link[0]
regex_name='<title>(.+?)</title'
match_name=re.compile(regex_name,re.DOTALL).findall(items)
if len(match_name)==0:
continue
else:
match_name=match_name[0]
regex_image='<thumbnail>http(.+?)</'
match_image=re.compile(regex_image,re.DOTALL).findall(items)
if len (match_image)>0:
match_image=match_image[0]
else:
match_image=' '
match.append((match_name,match_link,match_image))
#match=match+re.compile(regex,re.DOTALL).findall(items)
matches[i]=match
return matches[i]
except Exception as e:
logging.warning(e)
logging.warning('Bad Jen')
logging.warning(url2)
return []
def get_next_jen(url,icon,fanart):
from new_jen import get_list
get_list(url,icon,fanart)
#from jen import check_jen_categroys
#check_jen_categroys(url,icon,fanart)
def get_jen_list(url,icon,fan):
global matches
#from jen import check_jen_categroys
from new_jen import get_list
start_time = time.time()
all_links_in=[]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
stop_all=0
matches={}
thread=[]
all_lists=[]
all_jen_lisy=[]
for i in range (1,40):
if stop_all==1:
break
url2=Addon.getSetting("Pastebin-%s-"%url+str(i))
#if 'http' not in url2:
# continue
if Addon.getSetting("jen_fan_cat-"+str(i)):
fan=Addon.getSetting("jen_fan_cat-"+str(i))
else:
fan='https://koditips.com/wp-content/uploads/jen-kodi.jpg'
get_list(url2,icon,fan)
return 0
for td in thread:
td.start()
while 1:
num_live=0
still_alive=0
string_dp2=''
for threads in thread:
count=0
for yy in range(0,len(thread)):
if not thread[yy].is_alive():
num_live=num_live+1
else:
still_alive=1
string_dp2=thread[yy].name
if Addon.getSetting("jen_progress")=='true':
dp.update(0, 'Slow the First Time. Please Wait ',string_dp2, string_dp2)
if still_alive==0:
break
season=' '
dbcur_tmdb.execute("SELECT * FROM tmdb_data")
match4 = dbcur_tmdb.fetchall()
all_eng_name={}
for eng_name,imdb_id,icon,fanart,plot,rating,name,generes in match4:
all_eng_name[name.replace('é','e')]=[]
all_eng_name[name.replace('é','e')].append((eng_name,imdb_id,icon,fanart,plot,rating,name,generes))
for match in matches:
if stop_all==1:
break
for name_o,link,image in matches[match]:
image='http'+image.strip().replace('\n','').replace(' ','').replace('\r','').replace('\t','')
check=False
if Addon.getSetting("rdsource")=='true':
check=True
elif '1fichier.com' not in link and 'glasford.ddns.net' not in link and 'http://dl.my-film.in/' not in link and 'http://dl.my-film.org/' not in link and 'debrid' not in name_o.lower():
check=True
if check:
name_o=fix_name(name_o).strip()
name_o=name_o.replace('Real.Debrid.Only','')
if name_o.endswith('.'):
name_o=name_o[:-1]
info=(PTN.parse(name_o))
info['title']=info['title'].replace('-',' ').replace(' ','.')
if 'year' in info:
name_o=name_o.replace(str(info['year']),'').strip()
if name_o.endswith('.'):
name_o=name_o[:-1]
rest=''
for keys in info:
if keys!='title':
rest=rest+' '+str(info[keys])
count=count+1
imdb_id=' '
rating=' '
year_n='0'
if 'year' in info:
year_n=info['year']
if 'Season ' in link:
type='tv'
else:
type='movie'
if Addon.getSetting("jen_progress")=='true':
if dp.iscanceled():
dp.close()
stop_all=1
break
try:
items=all_eng_name[name_o.replace("'"," ").replace('é','e').replace('’',' ')][0]
except:
items=None
if items==None:
name,imdb_id,icon,fanart,plot,rating,generes=get_imdb_data(info,name_o,image,'jen',type)
if Addon.getSetting("jen_tmdb")=='true':
try:
dbcur_tmdb.execute("INSERT INTO tmdb_data Values ('%s', '%s', '%s', '%s', '%s', '%s','%s','%s');" % (name.replace("'"," "),imdb_id,icon.replace("'","%27"),fanart.replace("'","%27"),plot.replace("'"," "),rating,name_o.replace("'"," "),generes))
dbcon_tmdb.commit()
except:
all_data_inin=[]
all_data_inin.append((name.replace("'"," "),imdb_id,icon,fanart,plot.replace("'"," "),rating,name_o.replace("'"," "),generes))
sys.exit()
else:
eng_name,imdb_id,icon,fanart,plot,rating,name,generes=items
if imdb_id==None:
imdb_id=' '
o_plot=plot
if rating==None:
rating=' '
if generes==None:
generes=' '
elapsed_time = time.time() - start_time
if Addon.getSetting("jen_progress")=='true':
dp.update(int(((z* 100.0)/(len(matches))) ), 'Slow the First Time. Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),name, str(len(all_links_in))+' , '+str(match))
z=z+1
links2=[]
if '$$$' in link:
links2=link.split('$$$')
else:
links2.append(link)
for link in links2:
if '(' in link:
regex='http(.+?)\((.+?)\)'
match44=re.compile(regex).findall(link)
for links,name2 in match44:
links='http'+links
if links not in all_links_in and 'trailer' not in name2.lower():
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append(( (rest.replace('=',' ')+'---'+fix_name(name2)).replace('..','.'), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
if 'LISTSOURCE' in link :
regex='LISTSOURCE\:(.+?)\:\:LISTNAME\:(.+?)\:'
match2=re.compile(regex).findall(link)
if len(match2)>0:
for links,name2 in match2:
if '(' in links:
regex='http(.+?)\('
links=re.compile(regex).findall(links)[0]
if links not in all_links_in and 'trailer' not in name2.lower():
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append(( (rest.replace('=',' ')+'---'+fix_name(name2)).replace('..','.'), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
elif 'sublink' in link:
regex_sub='<sublink(.+?)>(.+?)</sublink>'
match_sub=re.compile(regex_sub).findall(link)
if len(match_sub)>0:
for ep,links in match_sub:
regex_ep='\]Season (.+?) Episode (.+?)\['
match_ep=re.compile(regex_ep,re.IGNORECASE).findall(ep)
if len(match_ep)>0:
season,episode=match_ep[0]
plot='Season '+season+' Episode '+episode+'\n'+o_plot
else:
season=' '
episode=' '
if '(' in links:
regex='http(.+?)\('
links=re.compile(regex).findall(links)[0]
if links not in all_links_in:
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append((ep.replace('=',' '), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,season,episode))
elif link not in all_links_in:
if '(' in link:
regex='http(.+?)\('
link=re.compile(regex).findall(link)
if len(link)>0:
link=link[0]
else:
continue
if 'youtube' in link or '<link>' in link or link=='ignore' or 'ignor.me' in link or link=='https://' or link=='http://' or link==None or 'http' not in link:
continue
all_links_in.append(link)
all_jen_lisy.append((rest.replace('=',' '), link,icon,fanart,plot,year_n,info['title'],info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
all_names=[]
all_links={}
for name, link,icon,fanart,plot,data,saved_name,original_title,id,rating,generes,show_original_year,season,episode in all_jen_lisy:
name1=saved_name.decode('utf8').strip()
if name1 not in all_names:
all_names.append(name1)
all_links[name1]={}
all_links[name1]['icon']=icon
all_links[name1]['image']=fanart
all_links[name1]['plot']=plot
all_links[name1]['data']=data
all_links[name1]['saved_name']=saved_name
all_links[name1]['original_title']=original_title
all_links[name1]['id']=id
all_links[name1]['rating']=rating
all_links[name1]['generes']=generes
all_links[name1]['show_original_year']=show_original_year
all_links[name1]['season']=season
all_links[name1]['episode']=episode
all_links[name1]['link']='[['+name+']]'+link
else:
if link not in all_links[name1]['link']:
if '$$$' in link:
links=link.split('$$$')
for link in links:
all_links[name1]['link']=all_links[name1]['link']+'$$$'+'[['+name+']]'+link
else:
all_links[name1]['link']=all_links[name1]['link']+'$$$'+'[['+name+']]'+link
for items in all_links:
icon=all_links[items]['icon']
fanart=all_links[items]['image']
plot=all_links[items]['plot']
data=all_links[items]['data']
saved_name=all_links[items]['saved_name']
original_title=all_links[items]['original_title']
id=all_links[items]['id']
rating=all_links[items]['rating']
generes=all_links[items]['generes']
show_original_year=all_links[items]['show_original_year']
season=all_links[items]['season']
episode=all_links[items]['episode']
link=all_links[items]['link']
addLink( items.replace('.',' '), link,5,False,icon,fanart,plot,data=data,saved_name=saved_name,original_title=original_title,id=id,rating=rating,generes=generes,show_original_year=show_original_year,season=season,episode=episode)
if Addon.getSetting("jen_progress")=='true':
dp.close()
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING)
def build_jen_db():
global matches
import urlparse
rd_domains=[u'4shared.com', u'rapidgator.net', u'sky.fm', u'1fichier.com', u'depositfiles.com', u'hitfile.net', u'filerio.com', u'solidfiles.com', u'mega.co.nz', u'scribd.com', u'flashx.tv', u'canalplus.fr', u'dailymotion.com', u'salefiles.com', u'youtube.com', u'faststore.org', u'turbobit.net', u'big4shared.com', u'filefactory.com', u'youporn.com', u'oboom.com', u'vimeo.com', u'redtube.com', u'zippyshare.com', u'file.al', u'clicknupload.me', u'soundcloud.com', u'gigapeta.com', u'datafilehost.com', u'datei.to', u'rutube.ru', u'load.to', u'sendspace.com', u'vidoza.net', u'tusfiles.net', u'unibytes.com', u'ulozto.net', u'hulkshare.com', u'dl.free.fr', u'streamcherry.com', u'mediafire.com', u'vk.com', u'uploaded.net', u'userscloud.com',u'nitroflare.com']
tmdb_cacheFile = os.path.join(done_dir,'cache_f', 'jen_db.db')
dbcon_tmdb = database.connect(tmdb_cacheFile)
dbcur_tmdb = dbcon_tmdb.cursor()
dbcur_tmdb.execute("CREATE TABLE IF NOT EXISTS %s ( ""name TEXT,""link TEXT,""year TEXT,""type TEXT);"% 'tmdb_data')
try:
dbcur_tmdb.execute("VACUUM 'AllData';")
dbcur_tmdb.execute("PRAGMA auto_vacuum;")
dbcur_tmdb.execute("PRAGMA JOURNAL_MODE=MEMORY ;")
dbcur_tmdb.execute("PRAGMA temp_store=MEMORY ;")
except:
pass
dbcon_tmdb.commit()
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Searching Sources', '','')
dp.update(0, 'Please Wait','Searching Sources', '' )
z=0
start_time = time.time()
all_links_in=[]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
stop_all=0
matches={}
thread=[]
all_lists=[]
all_jen_lisy=[]
m=0
for j in range(1,6):
for i in range (1,40):
if stop_all==1:
break
url2=Addon.getSetting(("Pastebin-%s-"%str(j))+str(i))
if 'http' not in url2:
continue
all_lists.append(url2)
matches[str(m)]=''
m+=1
thread.append(Thread(get_data,str(m),url2,headers))
thread[len(thread)-1].setName(str(m))
f = open(os.path.join(tmdb_data_dir, 'jen_lists.txt'), 'r')
file_data = f.readlines()
f.close()
for url2 in file_data:
if 'http' not in url2:
continue
all_lists.append(url2)
matches[str(m)]=''
m+=1
thread.append(Thread(get_data,str(m),url2,headers))
thread[len(thread)-1].setName(str(m))
for td in thread:
td.start()
while 1:
num_live=0
still_alive=0
string_dp2=''
for threads in thread:
count=0
for yy in range(0,len(thread)):
if not thread[yy].is_alive():
num_live=num_live+1
else:
still_alive=1
string_dp2=thread[yy].name
dp.update(0, 'Slow the First Time. Please Wait ',string_dp2, string_dp2)
if still_alive==0:
break
season=' '
dbcur_tmdb.execute("SELECT * FROM tmdb_data")
match4 = dbcur_tmdb.fetchall()
all_eng_name=[]
for eng_name,link,year,type in match4:
all_eng_name.append(link)
for match in matches:
if stop_all==1:
break
for name_o,link,image in matches[match]:
icon=image
fanart=image
plot=''
generes=''
image='http'+image.strip().replace('\n','').replace(' ','').replace('\r','').replace('\t','')
if 1:
name_o=fix_name(name_o).strip()
name_o=name_o.replace('Real.Debrid.Only','')
if name_o.endswith('.'):
name_o=name_o[:-1]
info=(PTN.parse(name_o))
info['title']=info['title'].replace('-',' ').replace(' ','.')
name=info['title']
if 'year' in info:
name_o=name_o.replace(str(info['year']),'').strip()
if name_o.endswith('.'):
name_o=name_o[:-1]
rest=''
for keys in info:
if keys!='title':
rest=rest+' '+str(info[keys])
count=count+1
imdb_id=' '
rating=' '
year_n='0'
if 'year' in info:
year_n=info['year']
if 'Season ' in link:
type='tv'
else:
type='movie'
if dp.iscanceled():
dp.close()
stop_all=1
break
if imdb_id==None:
imdb_id=' '
if rating==None:
rating=' '
elapsed_time = time.time() - start_time
dp.update(int(((z* 100.0)/(len(matches))) ), 'Slow the First Time. Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),name, str(len(all_links_in))+' , '+str(match))
z=z+1
links2=[]
if '$$$' in link:
links2=link.split('$$$')
else:
links2.append(link)
for link in links2:
if '(' in link:
regex='http(.+?)\((.+?)\)'
match44=re.compile(regex).findall(link)
for links,name2 in match44:
links='http'+links
if links not in all_links_in and 'trailer' not in name2.lower():
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append(( (rest.replace('=',' ')+'---'+fix_name(name2)).replace('..','.'), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
if 'LISTSOURCE' in link :
regex='LISTSOURCE\:(.+?)\:\:LISTNAME\:(.+?)\:'
match2=re.compile(regex).findall(link)
if len(match2)>0:
for links,name2 in match2:
if '(' in links:
regex='http(.+?)\('
links=re.compile(regex).findall(links)[0]
if links not in all_links_in and 'trailer' not in name2.lower():
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append(( (rest.replace('=',' ')+'---'+fix_name(name2)).replace('..','.'), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
elif 'sublink' in link:
regex_sub='<sublink(.+?)>(.+?)</sublink>'
match_sub=re.compile(regex_sub).findall(link)
if len(match_sub)>0:
for ep,links in match_sub:
regex_ep='\]Season (.+?) Episode (.+?)\['
match_ep=re.compile(regex_ep,re.IGNORECASE).findall(ep)
if len(match_ep)>0:
season,episode=match_ep[0]
plot='Season '+season+' Episode '+episode+'\n'+o_plot
else:
season=' '
episode=' '
if '(' in links:
regex='http(.+?)\('
links=re.compile(regex).findall(links)[0]
if links not in all_links_in:
if 'youtube' in links or '<links>' in links or links=='ignore' or 'ignor.me' in links or links=='https://' or links=='http://' or links==None or 'http' not in links:
continue
all_links_in.append(links)
all_jen_lisy.append((ep.replace('=',' '), links,icon,fanart,plot,year_n,name_o,info['title'],imdb_id,rating,generes,year_n,season,episode))
elif link not in all_links_in:
if '(' in link:
regex='http(.+?)\('
link=re.compile(regex).findall(link)
if len(link)>0:
link=link[0]
else:
continue
if 'youtube' in link or '<link>' in link or link=='ignore' or 'ignor.me' in link or link=='https://' or link=='http://' or link==None or 'http' not in link:
continue
all_links_in.append(link)
all_jen_lisy.append((rest.replace('=',' '), link,icon,fanart,plot,year_n,info['title'],info['title'],imdb_id,rating,generes,year_n,'%20','%20'))
all_names=[]
all_links={}
z=0
n=0
all_new=[]
for name, link,icon,fanart,plot,data,saved_name,original_title,id,rating,generes,show_original_year,season,episode in all_jen_lisy:
name1=saved_name.decode('utf8').strip()
dp.update(int(((z* 100.0)/(len(all_jen_lisy))) ), 'Updating DB',name1.replace('.',' ').replace("'","%27"),str( z)+','+'New:'+str(n))
if link not in all_eng_name:
host = link.replace("\\", "")
host2 = host.strip('"')
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)
if len(host)==0:
continue
host=host[0]
if host not in rd_domains:
type='free'
else:
type='RD'
n+=1
all_new.append(name1)
dbcur_tmdb.execute("INSERT INTO tmdb_data Values ('%s', '%s', '%s', '%s');" % (name1.replace('.',' ').replace("'","%27").lower(),link.replace("'","27"),str(show_original_year).replace("'","%27"),type))
z+=1
showText('New', '\n'.join(all_new))
dbcon_tmdb.commit()
dp.close()
def save_fav(id,tv_movie):
if tv_movie=='tv':
save_file=os.path.join(user_dataDir,"fav_tv.txt")
else:
save_file=os.path.join(user_dataDir,"fav_movie.txt")
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
if len(file_data)>150:
for i in range (len(file_data)-1,0,-1):
if (i<(len(file_data)-100)) and len(file_data[i])>0:
file_data.pop(i)
change=1
for i in range (len(file_data)-1,0,-1):
if len(file_data[i])<3:
file_data.pop(i)
change=1
if id not in file_data or change==1:
for i in range (len(file_data)-1,0,-1):
file_data[i]=file_data[i].replace('\n','')
if len(file_data[i])<3:
file_data.pop(i)
if id not in file_data:
file_data.append(id)
file = open(save_file, 'w')
file.write('\n'.join(file_data))
file.close()
def open_fav(url):
save_file=os.path.join(user_dataDir,"fav.txt")
if url=='movies':
type='movies'
elif url=='tv':
type='tv'
else:
type='all'
url=None
name=None
mode=None
iconimage=None
fanart=None
description=None
original_title=None
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
num=0
for items in file_data:
if len(items)>1:
list1=items.split("$$")
full_str=''
for item_as in list1:
full_str=full_str+chr(int(item_as))
params=get_custom_params(full_str)
url=None
name=None
mode=None
iconimage=None
fanart=None
description=None
original_title=None
data=0
id=' '
season=0
episode=0
show_original_year=0
heb_name=' '
tmdbid=' '
eng_name=' '
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
description=urllib.unquote_plus(params["description"])
except:
pass
try:
data=urllib.unquote_plus(params["data"])
except:
pass
try:
original_title=(params["original_title"])
except:
pass
try:
id=(params["id"])
except:
pass
try:
season=(params["season"])
except:
pass
try:
episode=(params["episode"])
except:
pass
try:
tmdbid=(params["tmdbid"])
except:
pass
try:
eng_name=(params["eng_name"])
except:
pass
try:
show_original_year=(params["show_original_year"])
except:
pass
try:
heb_name=(params["heb_name"])
except:
pass
te1=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode2="+str(mode)
te2="&name="+(name)+"&iconimage="+urllib.quote_plus(iconimage)+"&fanart="+urllib.quote_plus(fanart)+"&description="+urllib.quote_plus(description)+"&heb_name="+urllib.quote_plus(heb_name)
te3="&data="+str(data)+"&original_title="+urllib.quote_plus(original_title)+"&id="+(id)+"&season="+str(season)
te4="&episode="+str(episode)+"&tmdbid="+str(tmdbid)+"&eng_name="+(eng_name)+"&show_original_year="+(show_original_year)
u=te1 + te2 + te3 + te4.decode('utf8')
link="ActivateWindow(10025,%s,return)" % (u)
if (type=='movies' and mode==4) or type=='all' or (type=='tv' and mode==7):
addLink( name, link,99,True, iconimage,fanart,description,data=data,original_title=original_title,id=id,season=season,episode=episode,num_in_list=num)
num=num+1
def remove_to_fav(plot):
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
if plot+'\n' in file_data:
file_data.pop(file_data.index(plot+'\n'))
change=1
if change>0:
file = open(save_file, 'w')
file.write('\n'.join(file_data))
file.close()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Removed')).encode('utf-8'))
def remove_fav_num(plot):
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
if len(file_data)>=int(plot):
file_data.pop(int(plot))
change=1
if change>0:
file = open(save_file, 'w')
file.write('\n'.join(file_data))
file.close()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Removed')).encode('utf-8'))
xbmc.executebuiltin('Container.Refresh')
def play_by_subs(name,urls,iconimage,fanart,description_o,data,original_title,season,episode,id,eng_name,saved_name,original_title1,one_list=False):
from urllib import quote_plus
if urls=='torrents':
dbcur.execute("SELECT * FROM torrents")
urls = dbcur.fetchone()[0].decode('base64')
dp = xbmcgui.DialogProgress()
dp.create("Updating", "Please Wait", '')
dp.update(0)
all_magents=json.loads(urls)
plot=description_o
tmdbKey = '1248868d7003f60f2386595db98455ef'
if season!=None and season!="%20":
tv_movie='tv'
url2='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
else:
tv_movie='movie'
url2='http://api.themoviedb.org/3/movie/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
if 'tt' not in id:
try:
imdb_id=requests.get(url2).json()['external_ids']['imdb_id']
except:
imdb_id=" "
else:
imdb_id=id
all_subs_in=json.loads(urllib.unquote_plus(original_title))
all_data=[]
xxx=0
for mag in all_magents:
regex='- (.+?) -'
server_p=re.compile(regex).findall(all_magents[mag]['server'])
if len(server_p)>0:
server=server_p[0]
else:
server=''
dp.update(int(((xxx* 100.0)/(len(all_magents))) ), all_magents[mag]['name'],'')
xxx+=1
title=all_magents[mag]['name']
pre=check_pre(title.replace(' ','.').replace('(','').replace(')',''),all_subs_in['links'],original_title)
description=plot
if 1:#try:
info=(PTN.parse(title))
if 'resolution' in info:
res=info['resolution']
else:
if "HD" in title:
res="HD"
elif "720" in title:
res="720"
elif "1080" in title:
res="1080"
else:
res=' '
#except:
# res=' '
# pass
fixed_q=fix_q(res)
try:
regex=' - (.+?) GB'
size=re.compile(regex).findall(all_magents[mag]['server'])[0]
if 'MB' in size:
size=size/1000
except:
size=0
max_size=int(Addon.getSetting("size_limit"))
if float(size)<max_size:
regex='{P-(.+?)/S-(.+?)}'
seeds=re.compile(regex).findall(all_magents[mag]['server'])[0][1].replace(' ','')
peers=re.compile(regex).findall(all_magents[mag]['server'])[0][0].replace(' ','')
seeds=seeds.replace(',','')
peers=peers.replace(',','')
regex='-(.+?)GB'
if int(seeds)>=int(Addon.getSetting("min_seed")):
all_data.append(('[COLOR gold]'+str(pre)+'%'+ '[/COLOR]- P%s/S%s- [COLOR lightgreen]%sGB[/COLOR]-[COLOR khaki]'%(peers,seeds,size)+res+ '[/COLOR]',all_magents[mag]['link'],urllib.quote_plus(str(all_magents[mag]['name'])),pre,res,fixed_q,all_magents[mag]['plot'],title,int(seeds),size,server))
if Addon.getSetting("order_torrents_new")=='0':
all_data=sorted(all_data, key=lambda x: x[5], reverse=False)
elif Addon.getSetting("order_torrents_new")=='1':
all_data=sorted(all_data, key=lambda x: x[3], reverse=True)
elif Addon.getSetting("order_torrents_new")=='2':
all_2160=[]
all_1080=[]
all_720=[]
all_480=[]
all_else=[]
for name,link,origi,pre,res,fixed_q,description,title,seed,size,server in all_data:
if fixed_q==1:
all_2160.append((name,link,origi,pre,res,fixed_q,description,title,seed,size,server))
elif fixed_q==2:
all_1080.append((name,link,origi,pre,res,fixed_q,description,title,seed,size,server))
elif fixed_q==3:
all_720.append((name,link,origi,pre,res,fixed_q,description,title,seed,size,server))
elif fixed_q==4:
all_480.append((name,link,origi,pre,res,fixed_q,description,title,seed,size,server))
else :
all_else.append((name,link,origi,pre,res,fixed_q,description,title,seed,size,server))
all_2160=sorted(all_2160, key=lambda x: x[3], reverse=True)
all_1080=sorted(all_1080, key=lambda x: x[3], reverse=True)
all_720=sorted(all_720, key=lambda x: x[3], reverse=True)
all_480=sorted(all_480, key=lambda x: x[3], reverse=True)
all_else=sorted(all_else, key=lambda x: x[3], reverse=True)
all_data=all_2160+all_1080+all_720+all_480+all_else
elif Addon.getSetting("order_torrents_new")=='3':
all_data=sorted(all_data, key=lambda x: x[8], reverse=True)
else:
all_data=sorted(all_data, key=lambda x: x[9], reverse=True)
m=[]
for name,link,origi,pre,res,fixed_q,description,title,seed,size,server in all_data:
dp.update(int(((xxx* 100.0)/(len(all_magents))) ),name,'Ordering')
video_data={}
fixed_name=title
if season!=None and season!="%20":
video_data['TVshowtitle']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['mediatype']='tvshow'
else:
video_data['mediatype']='movies'
video_data['OriginalTitle']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['title']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['poster']=fanart
video_data['fanart']=fanart
video_data['plot']=description+'\n_from_Destiny_'
video_data['icon']=iconimage
video_data['year']=data
video_data['season']=season
video_data['episode']=episode
video_data['imdb']=imdb_id
video_data['code']=imdb_id
video_data['imdbnumber']=imdb_id
video_data['imdb_id']=imdb_id
video_data['IMDBNumber']=imdb_id
video_data['genre']=imdb_id
if ((Addon.getSetting("new_source_menu")=='true') or new_windows_only) :
regex='- P(.+?)/S(.+?)'
m1=re.compile(regex).findall(name)
if len(m1)>0:
added='{P'+m1[0][0]+'/S'+m1[0][1]
else:
added=''
m.append((title.replace('[','.').replace(']','.')+'['+server+']'+'-'+size+('GB -{%s}'%added),link,iconimage,fanart,description+'\n_from_Destiny_','',res,'',res,title,pre))
else:
addLink(name, link,5,False, iconimage,fanart,'[COLOR aqua]'+res+'[/COLOR]\n'+description,original_title=original_title,id=id,data=data,saved_name=title,video_info=json.dumps(video_data))
if Addon.getSetting("new_source_menu")=='true':
new_show_sources(m,data,description+'\n_from_Destiny_',fixed_name,episode,fanart,fixed_name,iconimage,id,fixed_name,original_title1,season,data,[],[],[],[])
dp.close()
def activate_torrent(sub,urls,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,saved_name):
from play import play
items=eval(urllib.unquote_plus(original_title))
title=sub.split("% ")[1]
try:
s=int (season)
tv_mode='tv'
except:
tv_mode='movie'
pass
if tv_mode=='movie':
payload = '?search=movie&imdb_id=%s&title=%s&year=%s' % (id, title, data)
play(urls, payload, items)
def server_test():
#addDir3('Scan Direct links', 'www',33, ' ',' ',' ')
onlyfiles = [f for f in listdir(done_dir) if isfile(join(done_dir, f))]
onlyfiles=onlyfiles+[f for f in listdir(mag_dir) if isfile(join(mag_dir, f))]
onlyfiles=onlyfiles+[f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
onlyfiles=sorted(onlyfiles, key=lambda x: x[0], reverse=False)
for items in onlyfiles:
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl.py' and items !='cache.py' and items!='cloudflare.py' and items!='Addon.py':
impmodule = __import__(items.replace('.py',''))
type,server_source=get_type(impmodule,items.replace('.py',''))
addDir3(items.replace('.py','')+'('+server_source+')', items,23, ' ',' ',' ')
def showText(heading, text):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
retry -= 1
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
return
except:
pass
def get_links_new(hostDict,imdb_id,name1,type,items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id,premiered,test=False):
global stop_all
logging.warning('test:'+str(test))
if allow_debrid:
import real_debrid
rd_domains=cache.get(get_rd_servers, 72, table='pages')
all_links_sources[name1]={}
all_links_sources[name1]['links']=[]
all_links_sources[name1]['torrent']=False
if 'rd' in type:
all_links_sources[name1]['rd']=True
else:
all_links_sources[name1]['rd']=False
try:
from general import server_data
aliases=[]
aliases.append({'country': 'us', 'title': original_title})
hostprDict = ['1fichier.com', 'oboom.com', 'rapidgator.net', 'rg.to', 'uploaded.net',
'uploaded.to', 'ul.to', 'filefactory.com', 'nitroflare.com', 'turbobit.net', 'uploadrocket.net','uploadgig.com']
s_type=''
try:
base=items.source()
except:
try:
base=items.sources()
except:
classes={}
import inspect
for name2, obj in inspect.getmembers(items):
if inspect.isclass(obj):
classes[name2] = obj
base=classes[name1]
s_type='universal'
if s_type=='universal':
if tv_movie=='movie':
sour=base().scrape_movie( original_title, show_original_year,imdb_id, debrid = allow_debrid)
for it in sour:
host_pre=re.compile('//(.+?)/').findall(it['url'])
if len(host_pre)>0:
host=host_pre[0].replace('www','')
else:
host='www'
pre_q=it['quality']
it['quality']=res_q(it['quality'])
if stop_all==1:
break
if 'magnet:' in it['url']:
all_links_sources[name1]['torrent']=True
if (Addon.getSetting("check_l")=='true' or test) and 'magnet:' not in it['url']:
try:
t_url=base.resolve(it['url'])
except:
t_url=it['url']
if allow_debrid:
try:
host = t_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
except:
host='no'
if host in rd_domains:
rd = real_debrid.RealDebrid()
url=t_url
link=rd.get_link(url)
if 'error' not in link:
if 'filename' in link:
name2=link['filename']
else:
name2=original_title
if 'host' in link:
match_s=link['host']
else:
regex='//(.+?)/'
match_s=host
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
else:
name2,match_s,res,check=server_data(t_url,original_title)
if check:
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
elif host not in hostprDict:
name2,match_s,res,check=server_data(t_url,original_title)
if check:
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
else:
added=it['source']
if it['source'].lower()=='magnet' or it['source'].lower()=='torrent':
try:
added='-magnet- '+pre_q.split('|')[1]
except:
added=it['source']
all_links_sources[name1]['links'].append((original_title,it['url'],added,it['quality']))
else:
sour=base().scrape_episode( original_title, show_original_year,show_original_year,season, episode,imdb_id,'', debrid = allow_debrid)
for it in sour:
host_pre=re.compile('//(.+?)/').findall(it['url'])
if len(host_pre)>0:
host=host_pre[0].replace('www','')
else:
host='www'
it['quality']=res_q(it['quality'])
if 'magnet:' in it['url']:
all_links_sources[name1]['torrent']=True
if stop_all==1:
break
if (Addon.getSetting("check_l")=='true' or test) and 'magnet:' not in it['url']:
try:
t_url=base.resolve(it['url'])
except:
t_url=it['url']
if allow_debrid:
try:
host = t_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
except:
host='no'
if host in rd_domains:
rd = real_debrid.RealDebrid()
url=t_url
link=rd.check_link(url)
if 'error' not in link:
if 'filename' in link:
name2=link['filename']
else:
name2=original_title
if 'host' in link:
match_s=link['host']
else:
regex='//(.+?)/'
match_s=host
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
else:
name2,match_s,res,check=server_data(t_url,original_title)
if check:
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
elif host not in hostprDict:
name2,match_s,res,check=server_data(t_url,original_title)
if check:
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
else:
all_links_sources[name1]['links'].append((original_title,it['url'],it['source'],it['quality']))
else:
if tv_movie=='movie':
try:
m_string=base.movie(imdb_id, original_title, original_title,aliases, show_original_year)
except:
try:
m_string=base.movie(imdb_id, original_title, original_title, show_original_year)
except:
m_string=base.movie(original_title, show_original_year)
try:
sources_string=base.sources(m_string, hostDict, hostprDict)
except Exception as e:
logging.warning(e)
sources_string=m_string
s_type='seren'
else:
try:
try:
m_string_pre=base.tvshow(imdb_id, '',original_title, original_title, aliases, show_original_year)
except:
m_string_pre=base.tvshow(imdb_id, '',original_title, original_title, show_original_year)#Gaia
m_string=base.episode(m_string_pre, imdb_id,'',original_title, premiered,season, episode)
sources_string=base.sources(m_string,hostDict, hostprDict)
except Exception as e:
logging.warning(e)
m_string_pre={}
m_string_pre['show_title']=original_title
m_string_pre['season_number']=season
m_string_pre['episode_number']=episode
m_string_pre['show_aliases']=''
m_string_pre['year']=show_original_year
m_string_pre['country']=''
m_string=base.episode(m_string_pre, [])
sources_string=m_string
s_type='seren'
if s_type=='seren':
for it in sources_string:
if stop_all==1:
break
if '2160' in it['release_title'] or '4k' in it['release_title']:
q='2160'
elif '1080' in it['release_title']:
q='1080'
elif '720' in it['release_title']:
q='720'
elif '480' in it['release_title']:
q='480'
elif '360' in it['release_title']:
q='360'
else:
q='unk'
if 'seeds' in it:
if it['seeds']==None:
seeds='0'
else:
seeds=it['seeds']
if 'size' not in it:
it['size']='0'
try:
added=' -magnet- '+str(float("{0:.2f}".format(float( it['size'])/(1024))) )+' GB'+' {P-%s/S-%s}'%('0',str(it['seeds']))
except Exception as e:
logging.warning('Error in Float:'+str(e))
logging.warning('mSize:'+it['metadata'].mSize)
added=''
all_links_sources[name1]['torrent']=True
else:
added='-magnet-'
all_links_sources[name1]['torrent']=False
q=res_q(q)
all_links_sources[name1]['links'].append((it['release_title'],it['magnet'],added,q))
else:
for it in sources_string:
host_pre=re.compile('//(.+?)/').findall(it['url'])
if len(host_pre)>0:
host=host_pre[0].replace('www','')
else:
host='www'
it['quality']=res_q(it['quality'])
if stop_all==1:
break
if 'info' in it and 'magnet' in it['url']:
try:
added='-magnet- '+it['info']
except:
added=''
elif 'metadata' in it:
if it['metadata'].mSeeds==None:
seeds='0'
else:
seeds=it['metadata'].mSeeds
try:
added='-magnet- '+str( float("{0:.2f}".format(float(it['metadata'].mSize)/(1024*1024*1024))) )+' GB'+' {P-%s/S-%s}'%('0',str(seeds))
except Exception as e:
logging.warning('Error in Float:'+str(e))
logging.warning(it['metadata'].mSize)
added=''
all_links_sources[name1]['torrent']=True
else:
added=''
all_links_sources[name1]['torrent']=False
if 'magnet:' in it['url']:
all_links_sources[name1]['torrent']=True
if (Addon.getSetting("check_l")=='true' or test) and all_links_sources[name1]['torrent']==False and 'magnet:' not in it['url']:
try:
t_url=base.resolve(it['url'])
except:
t_url=it['url']
if allow_debrid:
try:
host = t_url.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
except:
host='no'
if host in rd_domains:
rd = real_debrid.RealDebrid()
url=t_url
link=rd.check_link(url)
if 'error' not in link:
if 'filename' in link:
name2=link['filename']
else:
name2=original_title
if 'host' in link:
match_s=link['host']
else:
regex='//(.+?)/'
match_s=host
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
else:
name2,match_s,res,check=server_data(t_url,original_title)
if check:
all_links_sources[name1]['links'].append((name2,it['url'],match_s,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
elif host not in hostprDict:
name2,match_s,res,check=server_data(t_url,original_title)
if 'pageURL' in it['url']:
it['url']=json.dumps(it['url'])
if check:
all_links_sources[name1]['links'].append((original_title,it['url'],it['source']+' '+added,it['quality']))
elif test:
logging.warning('Test: BAD '+str(test))
all_links_sources[name1]['links'].append(('[COLOR red]BAD '+name2+'[/COLOR]',it['url'],match_s,it['quality']))
else:
if 'file' in it:
name2=it['file']
else:
name2=original_title
all_links_sources[name1]['links'].append((name2,it['url'],it['source']+' '+added,it['quality']))
all_links_sources[name1]['color']='white'
return all_links_sources
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN sources:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD source')
def get_type(items,name):
type=[]
source_scraper=''
try:
base=items.source()
source_scraper='exodus'
try:
p=items.source().pack
source_scraper='gaia'
except Exception as e:
logging.warning(e)
pass
except:
try:
base=items.sources()
source_scraper='seren'
except:
classes={}
import inspect
for name1, obj in inspect.getmembers(items):
if inspect.isclass(obj):
classes[name1] = obj
source_scraper='universal'
base=classes[name]
try:
a= base.episode
type.append('tv')
try:
a= base.movie
type.append('movie')
except Exception as e:
logging.warning(e)
pass
except Exception as e:
logging.warning(e)
try:
a= base.movie
type.append('movie')
except Exception as e:
a= base.scrape_episode
type.append('tv')
try:
a= base.scrape_movie
type.append('movie')
except Exception as e:
logging.warning(e)
pass
logging.warning(e)
pass
return type,source_scraper
def run_test(name_o):
global all_links_sources,stop_all
dp = xbmcgui.DialogProgress()
dp.create("Checking", "Please Wait", '')
dp.update(0)
name_o=name_o.split('(')[0]
dir_path = os.path.dirname(os.path.realpath(__file__))
mypath=done_dir
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
onlyfiles =onlyfiles+ [f for f in listdir(mag_dir) if isfile(join(mag_dir, f))]
onlyfiles =onlyfiles+ [f for f in listdir(rd_dir) if isfile(join(rd_dir, f))]
f_result={}
all_sources=[]
name_check=name_o
for items in onlyfiles:
if items !='general.py' and '.pyc' not in items and '.pyo' not in items and '__init__' not in items and items !='resolveurl.py' and items !='Addon.py' and items !='cache.py' and items!='cloudflare.py':
impmodule = __import__(items.replace('.py',''))
if name_check!='' :
if items.replace('.py','')==name_check:
all_sources.append((items.replace('.py',''),impmodule))
thread=[]
type=[]
string_dp=''
for name1,items in all_sources:
type,source_scraper=get_type(items,name1)
try:
import resolveurl
hostDict = resolveurl.relevant_resolvers(order_matters=True)
hostDict = [i.domains for i in hostDict if '*' not in i.domains]
hostDict = [i.lower() for i in reduce(lambda x, y: x+y, hostDict)]
hostDict = [x for y, x in enumerate(hostDict) if x not in hostDict[:y]]
except Exception:
hostDict = []
if name1==name_check:
if 'tv' in type and 'movie' in type:
choise=['TV','MOVIE']
ret = xbmcgui.Dialog().select("Choose", choise)
if ret!=-1:
if ret==0:
tv_movie='tv'
else:
tv_movie='movie'
else:
sys.exit()
elif 'tv' in type:
tv_movie='tv'
else:
tv_movie='movie'
if tv_movie=='tv':
original_title='The Flash'
show_original_year='2014'
season='4'
episode='5'
season_n='04'
episode_n='05'
id='60735'
name='the flash'
imdb_id='tt3107288'
premiered="2018-11-13"
else:
original_title='Rampage'
show_original_year='2018'
season='%20'
episode='00'
season_n='00'
episode_n='00'
id='427641'
imdb_id='tt2231461'
name='rampage'
premiered=''
#get_links_new(items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id)
thread.append(Thread(get_links_new,hostDict,imdb_id,name1,type,items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id,premiered,True))
thread[len(thread)-1].setName(name1)
for td in thread:
td.start()
str1=''
str2=''
still_alive=0
xxx=0
all_links_togther=[]
start_time=time.time()
while 1:
ir={}
for threads in thread:
still_alive=0
for yy in range(0,len(thread)):
if thread[yy].is_alive():
ir[thread[yy].getName()]='[COLOR lightseagreen]'+(thread[yy].getName())+'[/COLOR]'
still_alive=1
else:
ir[thread[yy].getName()]='[COLOR gold]'+(thread[yy].getName())+'[/COLOR]'
count_rest=0
count_1080=0
count_720=0
count_480=0
f_result={}
links_in=''
if name_check in all_links_sources:
if 'links' in all_links_sources[name_check]:
for data in all_links_sources[name_check]['links']:
name1,links,server,res=data
if '1080' in res:
count_1080+=1
elif '720' in res:
count_720+=1
elif '480' in res:
count_480+=1
else:
count_rest+=1
str1=' '
string_dp="1080: %s 720: %s 480: %s Rest: %s"%(count_1080,count_720,count_480,count_rest)
elapsed_time = time.time() - start_time
dp.update(int(((xxx* 100.0)/(100)) ), ' Please Wait '+' - [COLOR aqua]'+tv_movie+' - [/COLOR]'+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),string_dp)
if dp.iscanceled():
stop_all=1
for threads in thread:
if threads.is_alive():
threads._Thread__stop()
dp.close()
if still_alive==0:
break
stop_all=1
for name,link,server,res in all_links_sources[name_check]['links']:
addLink(name,link,5,False,' ',' ','sss '+name_check+' sss '+server+' \n'+res)
def open_settings():
Addon.openSettings()
def play_trailer_f(id,tv_movie):
import random
global search_done
if tv_movie=='movie':
url_t='http://api.themoviedb.org/3/movie/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
else:
url_t='http://api.themoviedb.org/3/tv/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
html_t=requests.get(url_t).json()
if len(html_t['results'])>0:
vid_num=random.randint(0,len(html_t['results'])-1)
else:
return 0
video_id=(html_t['results'][vid_num]['key'])
from youtube_ext import get_youtube_link2
playback_url=''
if video_id!=None:
try:
playback_url= get_youtube_link2('https://www.youtube.com/watch?v='+video_id).replace(' ','%20')
except Exception as e:
pass
#from pytube import YouTube
#playback_url = YouTube(domain_s+'www.youtube.com/watch?v='+video_id).streams.first().download()
if search_done==0:
xbmc.Player().play(playback_url)
def play_trailer(id,tv_movie):
if tv_movie=='movie':
url_t='http://api.themoviedb.org/3/movie/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
html_t=requests.get(url_t).json()
video_id=(html_t['results'][0]['key'])
else:
url_t='http://api.themoviedb.org/3/tv/%s/videos?api_key=1248868d7003f60f2386595db98455ef'%id
html_t=requests.get(url_t).json()
video_id=(html_t['results'][0]['key'])
from youtube_ext import get_youtube_link2
playback_url=''
if video_id!=None:
try:
playback_url= get_youtube_link2('https://www.youtube.com/watch?v='+video_id).replace(' ','%20')
except Exception as e:
pass
#from pytube import YouTube
#playback_url = YouTube(domain_s+'www.youtube.com/watch?v='+video_id).streams.first().download()
#playback_url = 'plugin://plugin.video.youtube/?action=play_video&videoid=%s' % video_id
item = xbmcgui.ListItem(path=playback_url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
def movie_recomended():
from random import randint
save_file=os.path.join(user_dataDir,"fav_movie.txt")
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
else:
xbmcgui.Dialog().ok('Error', 'No Viewing History....')
return 0
count=0
x=0
url_array=[]
new_name_array=[]
while count<5:
id=file_data[randint(0, len(file_data)-1)]
x=x+1
if x==len(file_data):
break
if len(id)>1 and '%' not in id:
url=domain_s+'api.themoviedb.org/3/movie/%s/recommendations?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%id.replace('\n','')
count=count+1
if url not in url_array:
url_array.append(url)
new_name_array=get_movies(url,0,reco=1,new_name_array=new_name_array)
def tv_recomended():
from random import randint
save_file=os.path.join(user_dataDir,"fav_tv.txt")
file_data=[]
change=0
if os.path.exists(save_file):
f = open(save_file, 'r')
file_data = f.readlines()
f.close()
else:
xbmcgui.Dialog().ok('Error', 'No Viewing History....')
return 0
count=0
x=0
url_array=[]
while count<4:
id=file_data[randint(0, len(file_data)-1)]
x=x+1
if x==len(file_data):
break
if len(id)>1 and '%' not in id:
url=domain_s+'api.themoviedb.org/3/tv/%s/recommendations?api_key=1248868d7003f60f2386595db98455ef&language=en&page=1'%id.replace('\n','')
count=count+1
if url not in url_array:
url_array.append(url)
get_movies(url,0,reco=1)
def get_tmdb_from_imdb(imdb,html_g,xxx):
global all_new_data
url=domain_s+'api.themoviedb.org/3/find/%s?api_key=1248868d7003f60f2386595db98455ef&external_source=imdb_id&language=en'%imdb
html=requests.get(url).json()
for data in html['movie_results']:
if 'vote_average' in data:
rating=data['vote_average']
else:
rating=0
if 'first_air_date' in data:
if data['first_air_date']==None:
year=' '
else:
year=str(data['first_air_date'].split("-")[0])
else:
if 'release_date' in data:
if data['release_date']==None:
year=' '
else:
year=str(data['release_date'].split("-")[0])
else:
year=' '
if 'overview' in data:
if data['overview']==None:
plot=' '
else:
plot=data['overview']
else:
plot=' '
if 'title' not in data:
new_name=data['name']
else:
new_name=data['title']
if 'original_title' in data:
original_name=data['original_title']
mode=4
id=str(data['id'])
else:
original_name=data['original_name']
id=str(data['id'])
mode=7
if data['poster_path']==None:
icon=' '
else:
icon=data['poster_path']
if 'backdrop_path' in data:
if data['backdrop_path']==None:
fan=' '
else:
fan=data['backdrop_path']
else:
fan=html['backdrop_path']
if plot==None:
plot=' '
if 'http' not in fan:
fan=domain_s+'image.tmdb.org/t/p/original/'+fan
if 'http' not in icon:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
genres_list= dict([(i['id'], i['name']) for i in html_g['genres'] \
if i['name'] is not None])
try:genere = u' / '.join([genres_list[x] for x in data['genre_ids']])
except:genere=''
trailer = "plugin://plugin.video.destinyds?mode=25&url=www&id=%s" % id
all_new_data.append((new_name,icon,fan,plot,year,original_name,id,rating,genere,trailer,xxx))
return new_name,icon,fan,plot,year,original_name,id,rating,genere,trailer,xxx
def latest_dvd(url):
global all_new_data
start_time=time.time()
if Addon.getSetting("dp")=='true':
dp = xbmcgui.DialogProgress()
dp.create("Loading", "Please Wait", '')
dp.update(0)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Host': 'www.dvdsreleasedates.com',
'Pragma': 'no-cache',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
}
url_g=domain_s+'api.themoviedb.org/3/genre/movie/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
html_g=requests.get(url_g).json()
html_o=requests.get(url,headers=headers).content
regex="'fieldtable-inner'.+?<a id='.+?'></a>(.+?)<(.+?)</table></td></tr>"
match=re.compile(regex,re.DOTALL).findall(html_o)
name_array=[]
all_new_data=[]
xxx=0
thread=[]
for dat,rest in match:
all_new_data.append(('[COLOR aqua][I]'+dat+'[/I][/COLOR]','','','','','','','','','',xxx))
regex="'http://www.imdb.com/title/(.+?)/'"
match_in=re.compile(regex,re.DOTALL).findall(rest)
for imdb in match_in:
if Addon.getSetting("dp")=='true':
elapsed_time = time.time() - start_time
dp.update(int(((xxx* 100.0)/(len(match_in)*len(match))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),imdb)
xxx=xxx+1
if imdb not in name_array:
thread.append(Thread(get_tmdb_from_imdb,imdb,html_g,xxx))
thread[len(thread)-1].setName(imdb)
for td in thread:
td.start()
if Addon.getSetting("dp")=='true':
elapsed_time = time.time() - start_time
dp.update(0, ' Starting '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),td.name)
#if len(thread)>38:
xbmc.sleep(255)
while 1:
still_alive=0
all_alive=[]
for yy in range(0,len(thread)):
if thread[yy].is_alive():
all_alive.append(thread[yy].name)
still_alive=1
if Addon.getSetting("dp")=='true':
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),','.join(all_alive))
if still_alive==0:
break
xbmc.sleep(100)
all_new_data=sorted(all_new_data, key=lambda x: x[10], reverse=False)
for new_name,icon,fan,plot,year,original_name,id,rating,genere,trailer,xxx in all_new_data:
if icon=='' and fan=='':
addNolink(new_name,'www',199,False,iconimage=domain_s+'pbs.twimg.com/profile_images/421736697647218688/epigBm2J.jpeg',fanart='http://www.dream-wallpaper.com/free-wallpaper/cartoon-wallpaper/spawn-wallpaper/1280x1024/free-wallpaper-24.jpg')
else:
addDir3(new_name,url,4,icon,fan,plot,data=year,original_title=original_name,id=id,rating=rating,heb_name=new_name,show_original_year=year,isr=isr,generes=genere,trailer=trailer)
if "a class='monthlink' href='" in html_o:
regex="<a class='monthlink' href='(.+?)' >(.+?)<"
match=re.compile(regex).findall(html_o)
for link,name in match:
addDir3('[COLOR aqua][I]'+name+'[/I][/COLOR]'.decode('utf8'),domain_s+'www.dvdsreleasedates.com'+link,28,' ',' ','Older results'.decode('utf8'))
break
if Addon.getSetting("dp")=='true':
dp.close()
def get_movie_data(url):
html=requests.get(url).json()
return html
def main_trakt():
addDir3('Lists','www',64,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Lists')
addDir3('Progress','users/me/watched/shows?extended=full',63,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Progress')
addDir3('Episode Watchlist ','sync/watchlist/episodes?extended=full',63,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Episode watchlist')
addDir3('Series Watchlist','users/me/watchlist/episodes?extended=full',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Series watchlist')
addDir3('Collection','users/me/collection/shows',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','collection')
addDir3('Shows Watchlist','users/me/watchlist/shows',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Shows watchlist')
addDir3('Movies Watchlist','users/me/watchlist/movies',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Movies watchlist')
addDir3('Watched Movies','users/me/watched/movies',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Watched movies')
addDir3('Watched Shows','users/me/watched/shows',31,'https://kodi.expert/wp-content/uploads/2018/05/trakt-logo.png','https://seo-michael.co.uk/content/images/2016/08/trakt.jpg','Watched shows')
def get_trakt():
trakt_lists=call_trakt("users/me/lists")
#trakt_lists=call_trakt('users/me/collection/shows')
my_lists = []
for list in trakt_lists:
my_lists.append({
'name': list["name"],
'user': list["user"]["username"],
'slug': list["ids"]["slug"]
})
for item in my_lists:
user = item['user']
slug = item['slug']
url=user+'$$$$$$$$$$$'+slug
addDir3(item['name'],url,31,' ',' ',item['name'])
def progress_trakt(url):
if Addon.getSetting("fav_search_f_tv")=='true' and Addon.getSetting("fav_servers_en_tv")=='true' and len(Addon.getSetting("fav_servers_tv"))>0:
fav_status='true'
else:
fav_status='false'
if Addon.getSetting("dp")=='true':
dp = xbmcgui.DialogProgress()
dp.create("Loading Episodes", "Please Wait", '')
dp.update(0)
import datetime
start_time = time.time()
xxx=0
ddatetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
url_g=domain_s+'api.themoviedb.org/3/genre/tv/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
html_g=requests.get(url_g).json()
result = call_trakt(url)
items = []
new_name_array=[]
for item in result:
try:
num_1 = 0
if 'seasons' in item:
for i in range(0, len(item['seasons'])):
if item['seasons'][i]['number'] > 0: num_1 += len(item['seasons'][i]['episodes'])
num_2 = int(item['show']['aired_episodes'])
if num_1 >= num_2: raise Exception()
season = str(item['seasons'][-1]['number'])
episode = [x for x in item['seasons'][-1]['episodes'] if 'number' in x]
episode = sorted(episode, key=lambda x: x['number'])
episode = str(episode[-1]['number'])
else:
season = str(item['episode']['season'])
episode=str(item['episode']['number'])
tvshowtitle = item['show']['title']
if tvshowtitle == None or tvshowtitle == '': raise Exception()
tvshowtitle = replaceHTMLCodes(tvshowtitle)
year = item['show']['year']
year = re.sub('[^0-9]', '', str(year))
if int(year) > int(ddatetime.strftime('%Y')): raise Exception()
imdb = item['show']['ids']['imdb']
if imdb == None or imdb == '': imdb = '0'
tmdb = item['show']['ids']['tmdb']
if tmdb == None or tmdb == '': raise Exception()
tmdb = re.sub('[^0-9]', '', str(tmdb))
trakt = item['show']['ids']['trakt']
if trakt == None or trakt == '': raise Exception()
trakt = re.sub('[^0-9]', '', str(trakt))
if 'last_watched_at' in item:
last_watched = item['last_watched_at']
else:
last_watched = item['listed_at']
if last_watched == None or last_watched == '': last_watched = '0'
items.append({'imdb': imdb, 'tmdb': tmdb, 'tvshowtitle': tvshowtitle, 'year': year, 'snum': season, 'enum': episode, '_last_watched': last_watched})
except Exception as e:
logging.warning(e)
result = call_trakt('/users/hidden/progress_watched?limit=1000&type=show')
result = [str(i['show']['ids']['tmdb']) for i in result]
items_pre = [i for i in items if not i['tmdb'] in result]
for items in items_pre:
watched='no'
not_yet=0
gone=0
season=items['snum']
episode=items['enum']
url='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(items['tmdb'],'1248868d7003f60f2386595db98455ef')
#url='http://api.themoviedb.org/3/tv/%s/season/%s?api_key=1248868d7003f60f2386595db98455ef&language=en'%(items['tmdb'],season)
html=cache.get(get_movie_data,time_to_save,url, table='pages')
plot=' '
if 'The resource you requested could not be found' not in str(html):
data=html
if 'vote_average' in data:
rating=data['vote_average']
else:
rating=0
if 'first_air_date' in data:
if data['first_air_date']==None:
year=' '
else:
year=str(data['first_air_date'].split("-")[0])
else:
if 'release_date' in data:
if data['release_date']==None:
year=' '
else:
year=str(data['release_date'].split("-")[0])
else:
year=' '
if 'overview' in data:
if data['overview']==None:
plot=' '
else:
plot=data['overview']
else:
plot=' '
if 'title' not in data:
new_name=data['name']
else:
new_name=data['title']
f_subs=[]
original_name=data['original_name']
id=str(data['id'])
mode=4
if data['poster_path']==None:
icon=' '
else:
icon=data['poster_path']
if 'backdrop_path' in data:
if data['backdrop_path']==None:
fan=' '
else:
fan=data['backdrop_path']
else:
fan=html['backdrop_path']
if plot==None:
plot=' '
if 'http' not in fan:
fan=domain_s+'image.tmdb.org/t/p/original/'+fan
if 'http' not in icon:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
genres_list= dict([(i['id'], i['name']) for i in html_g['genres'] \
if i['name'] is not None])
try:genere = u' / '.join([genres_list[x['id']] for x in data['genres']])
except:genere=''
trailer = "plugin://plugin.video.destinyds?mode=25&url=www&id=%s" % id
if new_name not in new_name_array:
new_name_array.append(new_name)
color='white'
elapsed_time = time.time() - start_time
if Addon.getSetting("dp")=='true':
dp.update(int(((xxx* 100.0)/(len(html))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'[COLOR'+color+']'+new_name+'[/COLOR]')
xxx=xxx+1
if int(data['last_episode_to_air']['season_number'])>=int(season):
if int(data['last_episode_to_air']['episode_number'])>int(episode):
episode=str(int(episode)+1)
else:
if int(data['last_episode_to_air']['season_number'])>int(season):
season=str(int(season)+1)
episode='1'
else:
if (data['next_episode_to_air'])!=None:
season=str(int(season)+1)
episode='1'
not_yet='1'
else:
gone=1
else:
if (data['next_episode_to_air'])!=None:
season=str(int(season)+1)
episode='1'
not_yet='1'
else:
gone=1
video_data={}
video_data['mediatype']='tvshow'
video_data['OriginalTitle']=new_name
video_data['title']=new_name
video_data['year']=year
video_data['season']=season
video_data['episode']=episode
video_data['genre']=genere
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
if Addon.getSetting("trac_trk")=='true':
addon='\n'+' Season'+season_n+'-Episode '+episode_n
else:
addon=''
video_data['plot']=plot+addon
try:
max_ep=data['seasons'][int(season)-1]['episode_count']
except Exception as e:
max_ep=100
if gone==0:
if not_yet==0:
if episode_n=='01':
dates=json.dumps((0,'' ,''))
elif max_ep<=int(episode):
dates=json.dumps(('','' ,0))
else:
dates=json.dumps(('','' ,''))
addDir3('[COLOR '+color+']'+new_name+'[/COLOR]'+' S'+season_n+'E'+episode_n,url,mode,icon,fan,plot+addon,data=year,original_title=original_name,id=id,rating=rating,heb_name=new_name,show_original_year=year,isr=isr,generes=genere,trailer=trailer,watched=watched,season=season,episode=episode,eng_name=original_title,tmdbid=id,video_info=video_data,dates=dates,fav_status=fav_status)
else:
addNolink('[COLOR red][I]'+ new_name.encode('utf8')+'[/I][/COLOR]'+' S'+season_n+'E'+episode_n, 'www',999,False,iconimage=icon,fanart=fan)
else:
responce=call_trakt("shows/{0}".format(items['trakt']), params={'extended': 'full'})
addNolink('[COLOR red][I]'+ responce['title']+'[/I][/COLOR]', 'www',999,False)
if Addon.getSetting("dp")=='true':
dp.close()
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING)
def get_trk_data(url,page):
o_url=url
try:
a=int(page)
except:
page=1
time_to_save=int(Addon.getSetting("save_time"))
xxx=0
if Addon.getSetting("dp")=='true':
dp = xbmcgui.DialogProgress()
dp.create("Loading", "Please Wait", '')
dp.update(0)
url_g_m=domain_s+'api.themoviedb.org/3/genre/movie/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
url_g_tv=domain_s+'api.themoviedb.org/3/genre/tv/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
html_g_tv=requests.get(url_g_tv).json()
html_g_m=requests.get(url_g_m).json()
start_time = time.time()
src="tmdb"
i,pages = (call_trakt('/users/me/watched/movies',pagination=True,page=page))
logging.warning('Pages:'+str(pages))
all_movie_w=[]
for ids in i:
all_movie_w.append(str(ids['movie']['ids']['tmdb']))
if '$$$$$$$$$$$' in url:
data_in=url.split('$$$$$$$$$$$')
user = data_in[0]
slug = data_in[1]
selected={'slug':data_in[1],'user':data_in[0]}
responce=call_trakt("/users/{0}/lists/{1}/items".format(user, slug))
else:
responce=call_trakt(url)
new_name_array=[]
for items in responce:
if 'show' in items:
slug = 'tv'
html_g=html_g_tv
else:
slug = 'movies'
html_g=html_g_m
if slug=='movies':
url='http://api.themoviedb.org/3/movie/%s?api_key=%s&language=en&append_to_response=external_ids'%(items['movie']['ids']['tmdb'],'1248868d7003f60f2386595db98455ef')
else:
url='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(items['show']['ids']['tmdb'],'1248868d7003f60f2386595db98455ef')
html=cache.get(get_movie_data,time_to_save,url, table='pages')
if 'The resource you requested could not be found' not in str(html):
data=html
if 'vote_average' in data:
rating=data['vote_average']
else:
rating=0
if 'first_air_date' in data:
if data['first_air_date']==None:
year=' '
else:
year=str(data['first_air_date'].split("-")[0])
else:
if 'release_date' in data:
if data['release_date']==None:
year=' '
else:
year=str(data['release_date'].split("-")[0])
else:
year=' '
if 'overview' in data:
if data['overview']==None:
plot=' '
else:
plot=data['overview']
else:
plot=' '
if 'title' not in data:
new_name=data['name']
else:
new_name=data['title']
f_subs=[]
if slug=='movies':
original_name=data['original_title']
mode=4
id=str(data['id'])
else:
original_name=data['original_name']
id=str(data['id'])
mode=7
if data['poster_path']==None:
icon=' '
else:
icon=data['poster_path']
if 'backdrop_path' in data:
if data['backdrop_path']==None:
fan=' '
else:
fan=data['backdrop_path']
else:
fan=html['backdrop_path']
if plot==None:
plot=' '
if 'http' not in fan:
fan=domain_s+'image.tmdb.org/t/p/original/'+fan
if 'http' not in icon:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
genres_list= dict([(i['id'], i['name']) for i in html_g['genres'] \
if i['name'] is not None])
try:genere = u' / '.join([genres_list[x['id']] for x in data['genres']])
except:genere=''
trailer = "plugin://plugin.video.destinyds?mode=25&url=www&id=%s" % id
if new_name not in new_name_array:
new_name_array.append(new_name)
color='white'
elapsed_time = time.time() - start_time
if Addon.getSetting("dp")=='true':
dp.update(int(((xxx* 100.0)/(len(html))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'[COLOR'+color+']'+new_name+'[/COLOR]')
xxx=xxx+1
watched='no'
if id in all_movie_w:
watched='yes'
'''
if id in all_tv_w:
if season+'x'+episode in all_tv_w[id]:
watched='yes'
'''
if slug=='movies':
fav_search_f=Addon.getSetting("fav_search_f")
fav_servers_en=Addon.getSetting("fav_servers_en")
fav_servers=Addon.getSetting("fav_servers")
else:
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0 ):
fav_status='true'
else:
fav_status='false'
addDir3('[COLOR '+color+']'+new_name+'[/COLOR]',url,mode,icon,fan,plot,data=year,original_title=original_name,id=id,rating=rating,heb_name=new_name,show_original_year=year,isr=isr,generes=genere,trailer=trailer,watched=watched,fav_status=fav_status)
else:
if slug=='movies':
responce=call_trakt("movies/{0}".format(items['movie']['ids']['trakt']), params={'extended': 'full'})
else:
responce=call_trakt("shows/{0}".format(items['show']['ids']['trakt']), params={'extended': 'full'})
addNolink('[COLOR red][I]'+ responce['title']+'[/I][/COLOR]', 'www',999,False)
if Addon.getSetting("dp")=='true':
dp.close()
if int(page)<int(pages):
addDir3('[COLOR aqua][I]Next Page[/COLOR][/I]',o_url,31,iconImage,fanart,'[COLOR aqua][I]Next Page[/COLOR][/I]',data=str(int(page)+1))
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING)
def get_one_trk(color,name,url_o,url,icon,fanart,data_ep,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image):
global all_data_imdb
import _strptime
data_ep=''
dates=' '
fanart=image
url=domain_s+'api.themoviedb.org/3/tv/%s/season/%s?api_key=1248868d7003f60f2386595db98455ef&language=en'%(id,season)
html=requests.get(url).json()
next=''
ep=0
f_episode=0
catch=0
counter=0
if 'episodes' in html:
for items in html['episodes']:
if 'air_date' in items:
try:
datea=items['air_date']+'\n'
a=(time.strptime(items['air_date'], '%Y-%m-%d'))
b=time.strptime(str(time.strftime('%Y-%m-%d')), '%Y-%m-%d')
if a>b:
if catch==0:
f_episode=counter
catch=1
counter=counter+1
except:
ep=0
else:
ep=0
episode_fixed=int(episode)-1
try:
plot=html['episodes'][int(episode_fixed)]['overview']
ep=len(html['episodes'])
if (html['episodes'][int(episode_fixed)]['still_path'])==None:
fanart=image
else:
fanart=domain_s+'image.tmdb.org/t/p/original/'+html['episodes'][int(episode_fixed)]['still_path']
if f_episode==0:
f_episode=ep
data_ep='[COLOR aqua]'+'Season '+season+'-Episode '+episode+ '[/COLOR]\n[COLOR gold] out of ' +str(f_episode) +' Episode for this Season[/COLOR]\n'
if int(episode)>1:
prev_ep=time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)-1]['air_date'], '%Y-%m-%d')))
else:
prev_ep=0
if int(episode)<ep:
if (int(episode)+1)>=f_episode:
color_ep='magenta'
next_ep='[COLOR %s]'%color_ep+time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)+1]['air_date'], '%Y-%m-%d'))) +'[/COLOR]'
else:
next_ep=time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)+1]['air_date'], '%Y-%m-%d')))
else:
next_ep=0
dates=((prev_ep,time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)]['air_date'], '%Y-%m-%d'))) ,next_ep))
if int(episode)<int(f_episode):
color='gold'
else:
color='white'
h2=requests.get('https://api.themoviedb.org/3/tv/%s?api_key=1248868d7003f60f2386595db98455ef&language=en-US'%id).json()
last_s_to_air=int(h2['last_episode_to_air']['season_number'])
last_e_to_air=int(h2['last_episode_to_air']['episode_number'])
if int(season)<last_s_to_air:
color='lightblue'
if h2['status']=='Ended' or h2['status']=='Cancelled':
color='peru'
if h2['next_episode_to_air']!=None:
if 'air_date' in h2['next_episode_to_air']:
a=(time.strptime(h2['next_episode_to_air']['air_date'], '%Y-%m-%d'))
next=time.strftime( "%d-%m-%Y",a)
else:
next=''
except Exception as e:
logging.warning('Error :'+ heb_name)
logging.warning('Error :'+ str(e))
plot=' '
color='green'
if f_episode==0:
f_episode=ep
data_ep='[COLOR aqua]'+'Season '+season+'-Episode '+episode+ '[/COLOR]\n[COLOR gold] out of ' +str(f_episode) +' Episode for this Season [/COLOR]\n'
dates=' '
fanart=image
try:
f_name=urllib.unquote_plus(heb_name.encode('utf8'))
except:
f_name=name
if (heb_name)=='':
f_name=name
if color=='peru':
add_p='[COLOR peru][B]This Show was Over or Cancelled[/B][/COLOR]'+'\n'
else:
add_p=''
add_n=''
if color=='white' and url_o=='tv' :
if next !='':
add_n='[COLOR tomato][I]Next Episode at ' +next+'[/I][/COLOR]\n'
else:
add_n='[COLOR tomato][I]Next Episode at ' +' Unknown '+'[/I][/COLOR]\n'
all_data_imdb.append((color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx))
return data_ep,dates,fanart,color,next
def get_Series_trk_data(url_o,match):
import _strptime
cacheFile_trk = os.path.join(user_dataDir, 'cache_play_trk.db')
dbcon_trk2 = database.connect(cacheFile_trk)
dbcur_trk2 = dbcon_trk2.cursor()
dbcur_trk2.execute("CREATE TABLE IF NOT EXISTS %s ( ""data_ep TEXT, ""dates TEXT, ""fanart TEXT,""color TEXT,""id TEXT,""season TEXT,""episode TEXT, ""next TEXT,""plot TEXT);" % 'AllData4')
dbcon_trk2.commit()
dbcur_trk2.execute("DELETE FROM AllData4")
image=' '
for item in match:
next=''
name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,tv_movie=item
#name,id,season,episode=item
data_ep=''
dates=' '
fanart=image
url=domain_s+'api.themoviedb.org/3/tv/%s/season/%s?api_key=1248868d7003f60f2386595db98455ef&language=en'%(id,season)
html=requests.get(url).json()
if 'status_message' in html:
if html['status_message']!='The resource you requested could not be found.':
xbmc.sleep(10000)
html=requests.get(url).json()
ep=0
f_episode=0
catch=0
counter=0
if 'episodes' in html:
for items in html['episodes']:
if 'air_date' in items:
try:
datea=items['air_date']+'\n'
a=(time.strptime(items['air_date'], '%Y-%m-%d'))
b=time.strptime(str(time.strftime('%Y-%m-%d')), '%Y-%m-%d')
if a>b:
if catch==0:
f_episode=counter
catch=1
counter=counter+1
except:
ep=0
else:
ep=0
episode_fixed=int(episode)-1
try:
try:
plot=html['episodes'][int(episode_fixed)]['overview']
except:
logging.warning(name.decode('utf-8'))
if 'episodes' not in html:
logging.warning(html)
logging.warning(episode_fixed)
plot=''
pass
ep=len(html['episodes'])
if (html['episodes'][int(episode_fixed)]['still_path'])==None:
fanart=image
else:
fanart=domain_s+'image.tmdb.org/t/p/original/'+html['episodes'][int(episode_fixed)]['still_path']
if f_episode==0:
f_episode=ep
data_ep='[COLOR aqua]'+'Season '+season+'-Episode '+episode+ '[/COLOR]\n[COLOR gold] out of ' +str(f_episode) +" Season's Episodes [/COLOR]\n"
if int(episode)>1:
prev_ep=time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)-1]['air_date'], '%Y-%m-%d')))
else:
prev_ep=0
try:
if int(episode)<ep:
if (int(episode)+1)>=f_episode:
color_ep='magenta'
next_ep='[COLOR %s]'%color_ep+time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)+1]['air_date'], '%Y-%m-%d'))) +'[/COLOR]'
else:
next_ep=time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)+1]['air_date'], '%Y-%m-%d')))
else:
next_ep=0
except:
next_ep=0
dates=((prev_ep,time.strftime( "%d-%m-%Y",(time.strptime(html['episodes'][int(episode_fixed)]['air_date'], '%Y-%m-%d'))) ,next_ep))
if int(episode)<int(f_episode):
color='gold'
else:
color='white'
h2=requests.get('https://api.themoviedb.org/3/tv/%s?api_key=1248868d7003f60f2386595db98455ef&language=en-US'%id).json()
last_s_to_air=int(h2['last_episode_to_air']['season_number'])
last_e_to_air=int(h2['last_episode_to_air']['episode_number'])
if int(season)<last_s_to_air:
color='lightblue'
if h2['status']=='Ended' or h2['status']=='Cancelled':
color='peru'
if h2['next_episode_to_air']!=None:
if 'air_date' in h2['next_episode_to_air']:
a=(time.strptime(h2['next_episode_to_air']['air_date'], '%Y-%m-%d'))
next=time.strftime( "%d-%m-%Y",a)
else:
next=''
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN Series Tracker:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD Series Tracker')
plot=' '
color='green'
if f_episode==0:
f_episode=ep
data_ep='[COLOR aqua]'+'Season '+season+'-Episode '+episode+ '[/COLOR]\n[COLOR gold] out of ' +str(f_episode) +" Season's Episodes [/COLOR]\n"
dates=' '
fanart=image
dbcon_trk2.execute("INSERT INTO AllData4 Values ('%s', '%s', '%s', '%s','%s', '%s', '%s','%s','%s');" % (data_ep.replace("'","%27"),json.dumps(dates),fanart.replace("'","%27"),color,id,season,episode,next,plot.replace("'","%27")))
dbcon_trk2.commit()
dbcon_trk2.close()
logging.warning('TRD SUCE')
return 0
def check_next_last_tv_subs(original_title,name,season,episode,show_original_year,id):
global susb_data_next
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
#f_name='√√ '+f_name
return susb_data_next
def check_last_tv_subs(original_title,name,season,episode,show_original_year,id):
global susb_data
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
#f_name='√√ '+f_name
return susb_data
def last_viewed(url_o,isr=' '):
global all_data_imdb
global susb_data,susb_data_next
import datetime
strptime = datetime.datetime.strptime
start_time=time.time()
if Addon.getSetting("dp_play")=='true':
dp = xbmcgui.DialogProgress()
dp.create("Collecting", "Please Wait", '')
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Collecting', '')
color='white'
if url_o=='tv':
dbcur.execute("SELECT * FROM Lastepisode WHERE type='tv' ")
else:
dbcur.execute("SELECT * FROM AllData WHERE type='movie'")
match_tv = dbcur.fetchall()
xxx=0
all_data_imdb=[]
thread=[]
for item in match_tv:
name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,tv_movie=item
logging.warning('isr2:'+isr)
dates=' '
next=''
data_ep=''
fanart=image
if Addon.getSetting("dp_play")=='true' :
dp.update(int(((xxx* 100.0)/(len(match_tv))) ), ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Collection', clean_name(original_title,1))
xxx+=1
done_data=0
if url_o=='tv' :
try:
dbcur_trk.execute("SELECT * FROM AllData4 WHERE id='%s' AND season='%s' AND episode='%s'"%(id,season,episode))
match2 = dbcur_trk.fetchone()
if match2!=None:
data_ep,dates,fanart,color,i,j,k,next,plot=match2
dates=json.loads(dates)
if color=='white' :
thread.append(Thread(get_one_trk,color,name,url_o,url,icon,fanart,data_ep,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image))
thread[len(thread)-1].setName(clean_name(original_title,1))
done_data=1
#data_ep,dates,fanart,color,next=get_one_trk(color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image)
else:
thread.append(Thread(get_one_trk,color,name,url_o,url,icon,fanart,data_ep,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image))
thread[len(thread)-1].setName(clean_name(original_title,1))
done_data=1
#data_ep,dates,fanart,color,next=get_one_trk(color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image)
except:
thread.append(Thread(get_one_trk,color,name,url_o,url,icon,fanart,data_ep,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image))
thread[len(thread)-1].setName(clean_name(original_title,1))
done_data=1
#data_ep,dates,fanart,color,next=get_one_trk(color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,image)
if done_data==0:
try:
f_name=urllib.unquote_plus(heb_name.encode('utf8'))
except:
f_name=name
if (heb_name)=='':
f_name=name
if color=='peru':
add_p='[COLOR peru][B]This Show was Over or Cancelled[/B][/COLOR]'+'\n'
else:
add_p=''
add_n=''
if color=='white' and url_o=='tv' :
if next !='':
add_n='[COLOR tomato][I]Next Episode at ' +next+'[/I][/COLOR]\n'
else:
add_n='[COLOR tomato][I]Next Episode at ' +' Unknown '+'[/I][/COLOR]\n'
all_data_imdb.append((color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx))
for td in thread:
td.start()
if Addon.getSetting("dp")=='true':
elapsed_time = time.time() - start_time
dp.update(0, ' Starting '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),td.name," ")
if len(thread)>38:
xbmc.sleep(255)
else:
xbmc.sleep(10)
while 1:
still_alive=0
all_alive=[]
for yy in range(0,len(thread)):
if thread[yy].is_alive():
still_alive=1
all_alive.append(thread[yy].name)
if still_alive==0:
break
if Addon.getSetting("dp")=='true' :
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),','.join(all_alive)," ")
xbmc.sleep(100)
if Addon.getSetting("dp")=='true' :
if dp.iscanceled():
dp.close()
break
thread=[]
if url_o=='tv':
dbcur.execute("SELECT * FROM subs")
match = dbcur.fetchall()
all_subs_db=[]
for title,id,season,episode in match:
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
next_ep=str(int(episode_n)+1)
if len(next_ep)==1:
next_ep_n="0"+next_ep
else:
next_ep_n=next_ep
sub_title=title.replace("%27","'")+'-'+id+'-'+season_n+'-'+episode_n
all_subs_db.append(sub_title)
for color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx in all_data_imdb:
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
next_ep=str(int(episode_n)+1)
if len(next_ep)==1:
next_ep_n="0"+next_ep
else:
next_ep_n=next_ep
sub_title=original_title.replace("%27","'")+'-'+id+'-'+season_n+'-'+episode_n
sub_title_next=original_title.replace("%27","'")+'-'+id+'-'+season_n+'-'+next_ep_n
if (color=='gold' or color=='white') :
if sub_title not in all_subs_db:
thread.append(Thread(check_last_tv_subs,original_title,heb_name,season,episode,show_original_year,id))
thread[len(thread)-1].setName(eng_name+' '+episode)
if color=='gold' and sub_title_next not in all_subs_db:
thread.append(Thread(check_next_last_tv_subs,original_title,heb_name,season,str(int(episode)+1),show_original_year,id))
thread[len(thread)-1].setName(eng_name+' '+str(int(episode)+1))
susb_data={}
susb_data_next={}
if url_o=='tv' :
for td in thread:
td.start()
if Addon.getSetting("dp")=='true' :
elapsed_time = time.time() - start_time
dp.update(0, ' Starting '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),td.name," ")
while 1:
still_alive=0
all_alive=[]
for yy in range(0,len(thread)):
if thread[yy].is_alive():
still_alive=1
all_alive.append(thread[yy].name)
if still_alive==0:
break
xbmc.sleep(100)
if Addon.getSetting("dp")=='true' :
if dp.iscanceled():
dp.close()
break
all_data_imdb=sorted(all_data_imdb, key=lambda x: x[19], reverse=False)
all_o_data=[]
level=0
for color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx in all_data_imdb:
if url_o=='tv':
if color=='gold':
level=1
elif color=='lightblue':
level=2
elif color=='green':
level=3
elif color=='white':
level=4
elif color=='peru':
level=5
else:
level+=1
all_o_data.append((color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,level))
if url_o=='tv':
order=False
else:
order=True
if Addon.getSetting("order_latest")=='true':
all_o_data=sorted(all_o_data, key=lambda x: x[20], reverse=order)
for color,f_name,url,icon,fanart,add_p,data_ep,add_n,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,xxx,pos in all_o_data:
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
next_ep=str(int(episode_n)+1)
if len(next_ep)==1:
next_ep_n="0"+next_ep
else:
next_ep_n=next_ep
sub_title=original_title.replace("%27","'")+'-'+id+'-'+season_n+'-'+episode_n
sub_title_next=original_title.replace("%27","'")+'-'+id+'-'+season_n+'-'+next_ep_n
all_d=((dates))
if color!='white' and len(all_d)>1:
add_n='[COLOR aqua]'+' Aired at '+all_d[1] + '[/COLOR]\n'
plot=plot.replace('%27',"'")
addDir3('[COLOR %s]'%color+ f_name+'[/COLOR]', url,4, icon,fanart,add_p+data_ep+add_n+plot,data=year,original_title=original_title,id=id,season=season,episode=episode,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=json.dumps(dates))
dbcur_trk.close()
dbcon_trk.close()
read_data2=[]
if url_o=='tv' :
read_data2.append((url_o,match_tv))
if Addon.getSetting("dp_play")=='true' :
dp.close()
return read_data2
def last_viewed_tvshows(url_o):
color='white'
if url_o=='tv':
dbcur.execute("SELECT * FROM Lastepisode WHERE type='tv' ")
else:
dbcur.execute("SELECT * FROM AllData WHERE type='movie'")
match = dbcur.fetchall()
all_o_data=[]
level=0
for item in match:
name,url,icon,image,plot,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,tv_movie=item
dates=' '
try:
f_name=urllib.unquote_plus(name.encode('utf8'))
except:
f_name=name
if (heb_name)=='':
f_name=name
level+=1
all_o_data.append((color,f_name,url,icon,image,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,level))
all_o_data=sorted(all_o_data, key=lambda x: x[16], reverse=True)
if url_o=='tv':
n_mode=7
else:
n_mode=4
for color,f_name,url,icon,fanart,plot,year,original_title,id,season,episode,eng_name,show_original_year,heb_name,isr,dates,pos in all_o_data:
if heb_name=='Direct link':
video_data={}
video_data['title']=f_name
video_data['year']=year
video_data['plot']=plot
addLink(f_name,url,5,False,iconimage=icon,fanart=fanart,description=plot,data=year,id=id,video_info=json.dumps(video_data))
else:
addDir3('[COLOR %s]'%color+ f_name.encode('utf8')+'[/COLOR]', url,n_mode, icon,fanart,plot,data=year,original_title=original_title,id=id,season=season,episode=episode,eng_name=eng_name,show_original_year=show_original_year,heb_name=heb_name,isr=isr,dates=json.dumps(dates))
def scan_direct_links(next):
from timeit import default_timer as timer
servers_db=os.path.join(__PLUGIN_PATH__, "resources","servers.db")
dp = xbmcgui.DialogProgress()
dp.create("Scaning", "Please Wait", '','')
dp.update(0)
dbconserver = database.connect(servers_db)
dbcurserver = dbconserver.cursor()
dbcurserver.execute("CREATE TABLE IF NOT EXISTS %s ( ""name TEXT, ""speed TEXT);" % 'servers')
dbcurserver.execute("VACUUM 'AllData';")
dbcurserver.execute("PRAGMA auto_vacuum;")
dbcurserver.execute("PRAGMA JOURNAL_MODE=MEMORY ;")
dbcurserver.execute("PRAGMA temp_store=MEMORY ;")
dbconserver.commit()
headers = {
'User-Agent': 'Mozilla/5.0 (Linux; U; Android 4.4.2; zh-CN; HUAWEI MT7-TL00 Build/HuaweiMT7-TL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.3.8.909 Mobile Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'TE': 'Trailers',
}
headers2 = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0'}
if next=='www':
html=requests.get(domain_s+'filepursuit.com/discover.php',headers=headers).content
else:
html=requests.get(domain_s+'filepursuit.com/discover.php?startrow='+next,headers=headers).content
regex="discover.php\?link=(.+?)'>(.+?)<"
match_all=re.compile(regex).findall(html)
f_time_avg=0
xxx=0
for links,name in match_all:
f_time_avg=0
for i in range(0,5):
try:
start = timer()
html2=requests.get(links,headers=headers2,timeout=1).content
if 'provider nor the domain owner maintain any relationship with the advertisers.' in html2 or 'tehmovies.org has expired' in html2 or domain_s+'www.google.com/recaptcha/api/fallback?k=' in html2 or 'Access Denied' in html2 or 'not found' in html2.lower() or 'Unauthorized' in html2 or 'Forbidden' in html2 or 'Service Unavailable' in html2:
f_time='TIMEOUT'
f_time_avg='TIMEOUT'
else:
end = timer()
f_time=float(end-start)
f_time_avg=f_time_avg+f_time
except Exception as e:
logging.warning(e)
f_time='TIMEOUT'
f_time_avg='TIMEOUT'
break
if dp.iscanceled():
dp.close()
return 0
break
if f_time_avg!='TIMEOUT':
final_time=str(f_time_avg/6)
else:
final_time='TIMEOUT'
if next=='www':
next=0
dp.update(int(((xxx* 100.0)/(len(match_all))) ), name,final_time,'Page '+str(int(next)/50))
xxx=xxx+1
dbcurserver.execute("SELECT * FROM servers WHERE name = '%s'"%(name))
match = dbcur.fetchone()
if match==None:
dbcurserver.execute("INSERT INTO servers Values ('%s', '%s');" % (name.replace("'"," "),final_time))
dbconserver.commit()
else:
dbcurserver.execute("UPDATE servers SET speed='%s' WHERE name = '%s'"%(final_time,name.replace("'"," ")))
dbconserver.commit()
dp.close()
regex='"discover.php\?startrow=(.+?)">Next</'
match_next=re.compile(regex).findall(html)
if len(match_next)>0:
scan_direct_links(match_next[0])
def remove_from_trace(name,original_title,id,season,episode):
if id=='0':
ok=xbmcgui.Dialog().yesno(("Remove from Series Tracker"),(' from Series Tracker?'+name+"Remove "))
else:
ok=xbmcgui.Dialog().yesno(("Remove Watched"),(' from Watched?'+name+"Remove "))
if ok:
if id=='0':
dbcur.execute("DELETE FROM Lastepisode WHERE original_title = '%s'"%(original_title))
dbcon.commit()
else:
if len(episode)==0:
episode='%20'
if len(season)==0:
season='%20'
episode=episode.replace(" ","%20")
season=season.replace(" ","%20")
dbcur.execute("DELETE FROM AllData WHERE original_title = '%s' AND season='%s' AND episode = '%s'"%(original_title,season.replace(" ","%20"),episode.replace(" ","%20")))
dbcon.commit()
xbmc.executebuiltin('Container.Refresh')
def play_level_movies(url):
from youtube_ext import get_youtube_link2
playback_url= get_youtube_link2(url).replace(' ','%20')
#from pytube import YouTube
#playback_url = YouTube(url).streams.first().download()
item = xbmcgui.ListItem(path=playback_url)
xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
def fast_play(final_link):
listItem = xbmcgui.ListItem('FP', path=final_link)
listItem.setInfo(type='Video', infoLabels={'title':'FP'})
listItem.setProperty('IsPlayable', 'true')
ok=xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listItem)
def get_jen_cat():
#addNolink( '[COLOR blue][B][I]Update Jen DB[/I][/B][/COLOR]', 'www',131,False, iconimage="DefaultFolder.png",fanart="DefaultFolder.png",description=' ')
for i in range (1,7):
cat_ena=Addon.getSetting("jen_cat-"+str(i))
if Addon.getSetting("jen_icon_cat-"+str(i))!='':
icon=Addon.getSetting("jen_icon_cat-"+str(i))
else:
icon='https://i.pinimg.com/736x/f2/4a/95/f24a95322599dd911e293779cd34a00a--purple-wallpaper-wallpaper-space.jpg'
if Addon.getSetting("jen_fan_cat-"+str(i))!='':
fan=Addon.getSetting("jen_fan_cat-"+str(i))
else:
fan='https://koditips.com/wp-content/uploads/jen-kodi.jpg'
if cat_ena=='true':
addDir3(Addon.getSetting("jen_name_cat-"+str(i)),str(i),43,icon,fan,Addon.getSetting("jen_name_cat-"+str(i)))
def fix_name_origin(saved_name,original_title):
regex_name='] (.+?) -'
if saved_name==None:
saved_name=''
match_name=re.compile(regex_name).findall(saved_name)
if len(match_name)>0:
fixed_name=match_name[0]
if clean_name(original_title,1).replace(' ','').replace(':','').replace("'",'').lower() not in fixed_name.replace("'",'').replace('.',' ').replace('_',' ').replace('-',' ').replace(':','').replace(' ','').lower():
fixed_name=original_title
else:
fixed_name=saved_name
if clean_name(original_title,1).replace(' ','').replace(':','').replace("'",'').lower() not in fixed_name.replace("'",'').replace('.',' ').replace('_',' ').replace('-',' ').replace(':','').replace(' ','').lower():
fixed_name=original_title
return fixed_name
def resolve_magnet(url,listitem,AWSHandler,info,mag_start_time):
if Addon.getSetting('allow_free')=='false':
return ''
from kodipopcorntime.torrent import TorrentPlayer
from kodipopcorntime import settings
mediaSettings = getattr(settings, 'movies')
item={'info': {'rating': 0.0, 'plotoutline': 'Elastigirl springs into action to save the day, while Mr. Incredible faces his greatest challenge yet \xe2\x80\x93 taking care of the problems of his three children.', 'code': 'tt3606756', 'director': '', 'studio': '', 'year': 2018, 'genre': 'animation / family / action / adventure / superhero', 'plot': 'Elastigirl springs into action to save the day, while Mr. Incredible faces his greatest challenge yet \xe2\x80\x93 taking care of the problems of his three children.', 'votes': 0.0, 'castandrole': [], 'title': 'Playing', 'tagline': '1080p: 18930 seeds; 720p: 14301 seeds; ', 'writer': '', 'originaltitle': 'Incredibles 2'}, 'thumbnail': 'http://image.tmdb.org/t/p/w500/x1txcDXkcM65gl7w20PwYSxAYah.jpg', 'stream_info': {'subtitle': {'language': ''}, 'audio': {'channels': 2, 'codec': 'aac', 'language': 'en'}, 'video': {'width': 1920, 'codec': 'h264', 'height': 720}}, 'label': 'Incredibles 2', 'properties': {'fanart_image': 'http://image.tmdb.org/t/p/w500/mabuNsGJgRuCTuGqjFkWe1xdu19.jpg'}, 'icon': 'http://image.tmdb.org/t/p/w500/x1txcDXkcM65gl7w20PwYSxAYah.jpg'}
return TorrentPlayer().playTorrentFile(mediaSettings, url, item,AWSHandler,info,mag_start_time, None,listitem)
def get_torrent_link(url):
from urllib import quote_plus
plugin_p = Addon.getSetting('players_new')
infohash=re.compile('btih:(.+?)&').findall(url)
if plugin_p=='0':
plugin = 'Quasar'
elif plugin_p=='1':
plugin = 'Pulsar'
elif plugin_p=='2':
plugin = 'KmediaTorrent'
elif plugin_p=='3':
plugin = 'Torrenter'
elif plugin_p=='4':
plugin = 'YATP'
elif plugin_p=='5':
plugin = 'XBMCtorrent'
elif plugin_p=='6':
plugin = 'KODIPOPCORN'
elif plugin_p=='7':
plugin = 'Destiny of Deathstar'
elif plugin_p=='8':
plugin = 'Acestream'
elif plugin_p=='9':
list_players = ['Quasar', 'Pulsar', 'KmediaTorrent', 'Torrenter', 'YATP', 'XBMCtorrent','KODIPOPCORN','Destiny of Deathstar','Acestream']
selection = xbmcgui.Dialog().select("Torrent Player", list_players)
if selection == -1:
return
plugin = list_players[selection]
filename = (url)
uri_string = quote_plus(filename)
if plugin == "Acestream":
link = 'http://127.0.0.1:6878/ace/getstream?infohash=%s&hlc=1&transcode=0&transcode_mp3=0&transcode_ac3=0&preferred_audio_language=eng'%infohash[0]
link='http://127.0.0.1:6878/ace/manifest.m3u8?infohash=%s&format=json&use_api_events=1&use_stop_notifications=1'%infohash[0]
try:
req_pre=requests.get(link).json()
except:
xbmcgui.Dialog().ok('Acestream Error','Opps ACESTREAM wasnt activated, Go a head and activate it...')
return ''
size=0
f_size=0
speed=0
peers=0
unit='b'
status=''
start_time=time.time()
dp = xbmcgui.DialogProgress()
dp.create("Starting", "Please Wait", '')
xbmc.sleep(100)
check=True
if req_pre['response']==None:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Acestream Error', 'Acestream Failed'.encode('utf-8'))))
list_players = ['Quasar', 'Pulsar', 'KmediaTorrent', 'Torrenter', 'YATP', 'XBMCtorrent','KODIPOPCORN','Destiny of Deathstar']
selection = xbmcgui.Dialog().select("Torrent Player", list_players)
if selection == -1:
return
plugin = list_players[selection]
check=False
if check:
if 'stat_url' in req_pre['response']:
stat_link=req_pre['response']['stat_url']
else:
xbmc.sleep(300)
stat_link=req_pre['response']['stat_url']
req=requests.get(stat_link).json()
while size<(1*1024*1024) or status!='dl':
if len(req)>0:
if 'downloaded' in req['response']:
size=req['response']['downloaded']
if size>1024:
f_size=size/1024
unit='Kb'
if size>(1024*1024):
f_size=size/(1024*1024)
unit='Mb'
if size>(1024*1024*1024):
f_size=size/(1024*1024*1024)
unit='Gb'
if 'peers' in req['response']:
peers=req['response']['peers']
if 'speed_down' in req['response']:
speed=req['response']['speed_down']
if 'status' in req['response']:
status=req['response']['status']
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+status+' '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'P-%s %sKb/s'%(str(peers),str(speed)), 'Size '+str(f_size)+unit)
xbmc.sleep(1000)
req=requests.get(stat_link).json()
if dp.iscanceled():
dp.close()
break
dp.close()
link=req_pre['response']['playback_url']
#link = 'http://127.0.0.1:6878/ace/getstream?infohash=%s&hlc=1&transcode=0&transcode_mp3=0&transcode_ac3=0&preferred_audio_language=eng'%infohash[0]
if plugin == 'Quasar':
link = 'plugin://plugin.video.quasar/play?uri=%s' % uri_string
elif plugin == 'Pulsar':
link = 'plugin://plugin.video.pulsar/play?uri=%s' % uri_string
elif plugin == 'KmediaTorrent':
link = 'plugin://plugin.video.kmediatorrent/play/%s' % uri_string
elif plugin == "Torrenter":
link = 'plugin://plugin.video.torrenter/?action=playSTRM&url=' + uri_string
elif plugin == "YATP":
link = 'plugin://plugin.video.yatp/?action=play&torrent=' + uri_string
elif plugin == "KODIPOPCORN":
link='plugin://plugin.video.kodipopcorntime/?endpoint=player&720psize=1331439862&1080psize=2566242959&720p='+uri_string+'&mediaType=movies'
elif plugin == "XBMCtorrent":
link = 'plugin://plugin.video.xbmctorrent/play/%s' % uri_string
else:
link=url
return link
def get_nan(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Accept': '*/*',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
result=requests.get(url,headers=headers).content
regex='"VideoIframe".+?src="(.+?)"'
m=re.compile(regex).findall(result)[0]
x=requests.get('http://docu.nana10.co.il/'+m,headers=headers).content
regex='UserID=(.+?);'
userid=re.compile(regex).findall(x)[0]
regex='"MediaStockVideoItemGroupID","(.+?)"'
VideoID=re.compile(regex).findall(x)[0]
url='http://vod.ch10.cloudvideoplatform.com/api/getlink/getflash?userid=%s&showid=%s'%(userid,VideoID)
y=requests.get(url,headers=headers).json()
m_lk=y[0]['MediaFile'].split('.')[0]+y[0]['Bitrates']+'.'+y[0]['MediaFile'].split('.')[1]
f_url=y[0]['ProtocolType']+y[0]['ServerAddress']+y[0]['MediaRoot']+m_lk+y[0]['StreamingType']+y[0]['Params']
return f_url
def get_nex_ep( time_to_save, original_title,year,season,episode,id,eng_name,show_original_year,heb_name,isr,st,fav,prev_name,url,iconimage,fanart,o_plot):
global in_next_ep
if in_next_ep==1:
return 'ok'
in_next_ep=1
try:
da=[]
stop_window=False
da.append((original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,st,fav))
match_a,a,b,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,st,fav,'no','0',table='pages')
logging.warning('DONE NEXT EP SEARCHING')
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Error', str(e))).encode('utf-8'))
logging.warning('ERROR IN NEXTUP:'+str(lineno))
logging.warning('inline:'+line)
logging.warning(e)
logging.warning('BAD NEXT EP SEARCHING')
#match_a,a,b,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,st,fav,'no','0',table='pages')
in_next_ep=0
def get_uptobox(url):
global tv_mode
if 'uptostream' not in url:
x=requests.get(url).content
regex='<a href="https://uptostream.com/(.+?)"'
match=re.compile(regex).findall(x)
url=domain_s+'uptostream.com/'+match[0]
cookies = {
#'__cfduid': 'd0dfe3eedd616e0f275edcea08cdb6e521520582950',
'video': '55srlypu0c08',
}
headers = {
'Host': 'uptostream.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Referer': url,
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = requests.get(url, headers=headers, cookies=cookies,timeout=10).content
regex='var sources = (.+?);'
match=re.compile(regex).findall(response)
if len(match)==0:
regex="sources = JSON.parse\('(.+?)'"
match=re.compile(regex).findall(response)
links=json.loads(match[0])
quality=[]
links2=[]
for data in links:
quality.append(int(data['label'].replace('p','')))
links2.append(data['src'])
if local==True or tv_mode:
return links2[0]
else:
return links2[quality.index(max(quality))]
ret = xbmcgui.Dialog().select("Choose quality", quality)
if ret!=-1:
f_link=links2[ret]
else:
return 0
return f_link
def nba_solve(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': '*/*',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': url,
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'TE': 'Trailers',
}
host=re.compile('//(.+?)/').findall(url)[0]
data = {
'r': '',
'd': host
}
response = requests.post(url.replace('/v/','/api/source/'), headers=headers, data=data).json()
return response['data'][len(response['data'])-1]['file']
def regex_from_to(text, from_string, to_string, excluding=True):
if excluding:
try: r = re.search("(?i)" + from_string + "([\S\s]+?)" + to_string, text).group(1)
except: r = ''
else:
try: r = re.search("(?i)(" + from_string + "[\S\s]+?" + to_string + ")", text).group(1)
except: r = ''
return r
def regex_get_all(text, start_with, end_with):
r = re.findall("(?i)(" + start_with + "[\S\s]+?" + end_with + ")", text)
return r
def sort_function(item):
"""
transform items quality into a string that's sort-able
Args:
item: scraper link
Returns:
sortable quality string
"""
if "quality" in item[1][0]:
quality = item[1][0]["quality"]
else:
quality = item[1][0]["path"]["quality"]
if "path" in item[1][0]:
if "debridonly" in item[1][0]["path"]:
q = "A"
elif "debridonly" in item[1][0]:
q = "A"
else:
q = "B"
if q == "A":
if quality.startswith("4K"):
quality = "Aa"
elif quality.startswith("1080"):
quality = "Ab"
elif quality.startswith("720"):
quality = "Ac"
elif quality.startswith("560"):
quality = "Ad"
elif quality == "DVD":
quality = "Ae"
elif quality == "HD":
quality = "Af"
elif quality.startswith("480"):
quality = "Ba"
elif quality.startswith("360"):
quality = "Bb"
elif quality.startswith("SD"):
quality = "Bc"
else:
quality = "CZ"
elif quality.startswith("4K"):
quality = "HDa"
elif quality.startswith("1080"):
quality = "HDb"
elif quality.startswith("720"):
quality = "HDc"
elif quality.startswith("560"):
quality = "HDd"
elif quality == "DVD":
quality = "HDe"
elif quality == "HD":
quality = "HDf"
elif quality.startswith("480"):
quality = "SDa"
elif quality.startswith("360"):
quality = "SDb"
elif quality.startswith("SD"):
quality = "SDc"
else:
quality = "Z"
return quality
def GetSublinks(name,url,iconimage,fanart,title,year,imdb):
List=[]; ListU=[]; c=0
all_videos = regex_get_all(url, 'sublink:', '#')
for a in all_videos:
if 'LISTSOURCE:' in a:
vurl = regex_from_to(a, 'LISTSOURCE:', '::')
linename = regex_from_to(a, 'LISTNAME:', '::')
else:
vurl = a.replace('sublink:','').replace('#','')
linename = name
if len(vurl) > 10:
c=c+1; List.append(linename); ListU.append(vurl)
if c==1:
return ListU[0]
#print 'play 1 Name:' + name + ' url:' + ListU[0] + ' ' + str(c)
liz=xbmcgui.ListItem(name, iconImage=iconimage,thumbnailImage=iconimage); liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=ListU[0],listitem=liz)
xbmc.Player().play(urlsolver(ListU[0]), liz)
else:
if len(List)==0:
import universalscrapers
title=title.replace('.',' ')
scraper = universalscrapers.scrape_movie_with_dialog
link, rest = scraper(
title,
year,
imdb,
timeout=30,
exclude=None,
extended=True,
sort_function=sort_function,
enable_debrid=allow_debrid,
)
if type(link) == dict and "path" in link:
link = link["path"]
if link is None:
return False
url = link["url"]
if 'http' not in url and 'magnet' not in url:
url='http:'+url
return url
else:
dialog=xbmcgui.Dialog()
rNo=dialog.select('[COLORorange]Select A Source[/COLOR]', List)
if rNo>=0:
rName=name
rURL=str(ListU[rNo])
return rURL
#print 'Sublinks Name:' + name + ' url:' + rURL
try:
xbmc.Player().play(urlsolver(rURL), xbmcgui.ListItem(rName))
except:
xbmc.Player().play(rURL, xbmcgui.ListItem(rName))
def check_pre(saved_name,all_subs,original_title):
try:
release_names=['bluray','hdtv','dvdrip','bdrip','web-dl']
#array_original=list(saved_name)
fixed_name=saved_name.lower().strip().replace("%20",".").replace("_",".").replace(" ",".").replace("-",".").replace(".avi","").replace(".mp4","").replace(".mkv","")
original_title=original_title.lower().strip().replace("%20",".").replace("_",".").replace(" ",".").replace("-",".").replace(".avi","").replace(".mp4","").replace(".mkv","")
fixed_name=fixed_name.decode('utf-8','ignore').encode("utf-8").replace(original_title,'')
if fixed_name=='':
return 0
array_original=fixed_name.split(".")
array_original=[line.strip().lower() for line in array_original]
array_original=[(x) for x in array_original if x != '']
highest=0
all_subs_new=[]
for items in all_subs:
#array_subs=list(items)
fixed_name=items['MovieReleaseName'].lower().strip().replace("%20",".").replace("_",".").replace(" ",".").replace("-",".").replace(".avi","").replace(".mp4","").replace(".mkv","")
fixed_name=fixed_name.replace(original_title,'')
array_subs=fixed_name.split(".")
array_subs=[line.strip().lower() for line in array_subs]
array_subs=[str(x).lower() for x in array_subs if x != '']
for item_2 in release_names:
if item_2 in array_original and item_2 in array_subs:
array_original.append(item_2)
array_original.append(item_2)
array_original.append(item_2)
array_subs.append(item_2)
array_subs.append(item_2)
array_subs.append(item_2)
precent=similar(array_original,array_subs)
items['pre']=precent
all_subs_new.append(items)
if precent>=highest:
highest=precent
return all_subs_new
except Exception as e:
logging.warning('check_pre error')
logging.warning(e)
def get_sub_server(imdb,season,episode):
logging.warning('In 4')
import xmlrpclib
langs = []
langDict = {'Afrikaans': 'afr', 'Albanian': 'alb', 'Arabic': 'ara', 'Armenian': 'arm', 'Basque': 'baq', 'Bengali': 'ben', 'Bosnian': 'bos', 'Breton': 'bre', 'Bulgarian': 'bul', 'Burmese': 'bur', 'Catalan': 'cat', 'Chinese': 'chi', 'Croatian': 'hrv', 'Czech': 'cze', 'Danish': 'dan', 'Dutch': 'dut', 'English': 'eng', 'Esperanto': 'epo', 'Estonian': 'est', 'Finnish': 'fin', 'French': 'fre', 'Galician': 'glg', 'Georgian': 'geo', 'German': 'ger', 'Greek': 'ell', 'Hebrew': 'heb', 'Hindi': 'hin', 'Hungarian': 'hun', 'Icelandic': 'ice', 'Indonesian': 'ind', 'Italian': 'ita', 'Japanese': 'jpn', 'Kazakh': 'kaz', 'Khmer': 'khm', 'Korean': 'kor', 'Latvian': 'lav', 'Lithuanian': 'lit', 'Luxembourgish': 'ltz', 'Macedonian': 'mac', 'Malay': 'may', 'Malayalam': 'mal', 'Manipuri': 'mni', 'Mongolian': 'mon', 'Montenegrin': 'mne', 'Norwegian': 'nor', 'Occitan': 'oci', 'Persian': 'per', 'Polish': 'pol', 'Portuguese': 'por,pob', 'Portuguese(Brazil)': 'pob,por', 'Romanian': 'rum', 'Russian': 'rus', 'Serbian': 'scc', 'Sinhalese': 'sin', 'Slovak': 'slo', 'Slovenian': 'slv', 'Spanish': 'spa', 'Swahili': 'swa', 'Swedish': 'swe', 'Syriac': 'syr', 'Tagalog': 'tgl', 'Tamil': 'tam', 'Telugu': 'tel', 'Thai': 'tha', 'Turkish': 'tur', 'Ukrainian': 'ukr', 'Urdu': 'urd'}
try:
try: langs = langDict[Addon.getSetting('subtitles.lang.1')].split(',')
except: langs.append(langDict[Addon.getSetting('subtitles.lang.1')])
except: pass
try:
try: langs = langs + langDict[Addon.getSetting('subtitles.lang.2')].split(',')
except: langs.append(langDict[Addon.getSetting('subtitles.lang.2')])
except: pass
server = xmlrpclib.Server('http://api.opensubtitles.org/xml-rpc', verbose=0)
logging.warning('4')
token = server.LogIn('', '', 'en', 'XBMC_Subtitles_v1')['token']
sublanguageid = ','.join(langs) ; imdbid = re.sub('[^0-9]', '', imdb)
logging.warning('5')
if not (season == None or episode == None):
result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid, 'season': season, 'episode': episode}])
logging.warning(result)
result=result['data']
else:
result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid}])['data']
logging.warning('In 5')
return result
def get_sub_result(imdb,season,episode,name,saved_name):
logging.warning('In 1')
#result=get_sub_server(imdb,season,episode)
da=[]
da.append((imdb,season,episode))
logging.warning('Subtitles Search result')
logging.warning(da)
if season=='%20':
season=None
if episode=='%20':
episode=None
result=cache.get(get_sub_server,24,imdb,season,episode, table='pages')
logging.warning('In 2')
f_list=result
#result=check_pre(saved_name,result,name)
logging.warning('In 4')
return result,f_list
def getsubs( name, imdb, season, episode,saved_name):
global done1
if not Addon.getSetting('subtitles') == 'true': return 'ok'
logging.warning('1')
codePageDict = {'ara': 'cp1256', 'ar': 'cp1256', 'ell': 'cp1253', 'el': 'cp1253', 'heb': 'cp1255', 'he': 'cp1255', 'tur': 'cp1254', 'tr': 'cp1254', 'rus': 'cp1251', 'ru': 'cp1251'}
quality = ['bluray', 'hdrip', 'brrip', 'bdrip', 'dvdrip', 'webrip', 'hdtv']
logging.warning('2')
logging.warning('3')
'''
try: subLang = xbmc.Player().getSubtitles()
except: subLang = ''
if subLang == langs[0]: raise Exception()
'''
if season=='%20':
season=None
if episode=='%20':
episode=None
#result,f_list=get_sub_result(imdb,season,episode,name,saved_name)
result,f_list=cache.get(get_sub_result,24,imdb,season,episode,name,saved_name, table='pages')
logging.warning('check_pre')
result=check_pre(saved_name,result,name)
fixed_list=[]
logging.warning('4')
if result==0:
for items in f_list:
fixed_list.append((0,items['MovieReleaseName'],items['IDSubtitleFile'],items['SubLanguageID']))
else:
for items in result:
fixed_list.append((items['pre'],items['MovieReleaseName'],items['IDSubtitleFile'],items['SubLanguageID']))
fixed_list=sorted(fixed_list, key=lambda x: x[0], reverse=True)
logging.warning('5')
if len(fixed_list)==0:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'No Available Subs')))
else:
logging.warning('Show Window')
window = MySubs('Subtitles - '+name ,fixed_list,f_list)
window.doModal()
del window
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1=2
'''
filter = []
result = [i for i in result if i['SubSumCD'] == '1']
for lang in langs:
filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in fmt)]
filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in quality)]
filter += [i for i in result if i['SubLanguageID'] == lang]
try: lang = xbmc.convertLanguage(filter[0]['SubLanguageID'], xbmc.ISO_639_1)
except: lang = filter[0]['SubLanguageID']
content = [filter[0]['IDSubtitleFile'],]
content = server.DownloadSubtitles(token, content)
content = base64.b64decode(content['data'][0]['data'])
content = gzip.GzipFile(fileobj=StringIO.StringIO(content)).read()
subtitle = xbmc.translatePath('special://temp/')
subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang)
logging.warning(subtitle)
codepage = codePageDict.get(lang, '')
if codepage and control.setting('subtitles.utf') == 'true':
try:
content_encoded = codecs.decode(content, codepage)
content = codecs.encode(content_encoded, 'utf-8')
except:
pass
file = control.openFile(subtitle, 'w')
file.write(str(content))
file.close()
xbmc.sleep(1000)
#xbmc.Player().setSubtitles(subtitle)
'''
def start_subs(name, imdb, season, episode,saved_name):
global wait_for_subs,done1
logging.warning('wait_for_subs:'+str(wait_for_subs))
if wait_for_subs==1:
return 'ok'
wait_for_subs=1
exit_counter=0
get_sub_now=0
play_time=1
if Addon.getSetting("new_window_type2")=='3':
play_time=int(Addon.getSetting("play_full_time"))+1
if done1_1==3:
play_time=1
while(1):
if done1_1==3:
play_time=1
if xbmc.Player().isPlaying():
xbmc.sleep(1000)
vidtime = xbmc.Player().getTime()
if vidtime > play_time :
logging.warning('Vidtime OK:'+str(vidtime))
get_sub_now=1
break
if exit_counter>600:
break
exit_counter+=1
xbmc.sleep(100)
wait_for_subs=0
logging.warning('Vidtime OK:'+str(get_sub_now))
if get_sub_now>0:
#getsubs( 'Rampage', 'tt2231461', None, None,'Rampage.2018.720p.BluRay.x264-SPARKS')
if season=='%20':
season=None
if episode=='%20':
episode=None
#xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
getsubs( name, imdb, season, episode,saved_name)
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
return 'OK'
def decode_watch(x):
regex='<script>var.+?\[(.+?)</script'
m1=re.compile(regex,re.DOTALL).findall(x.content)[0]
regex='"(.+?)"'
m2=re.compile(regex,re.DOTALL).findall(m1)
f_str=''
reg='atob.+? - (.+?)\)'
ma=re.compile(reg).findall(m1)[0]
for items in m2:
a=items.decode('base64')
b=re.sub("\D", "", a)
f_str=f_str+chr(int(b)-int(ma))
regex='src="(.+?)"'
m3=re.compile(regex).findall(f_str)[0]
return m3
def reolve_watchcartoononline(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
#'Referer': 'https://www.watchcartoononline.io/101-dalmatians-ii-patchs-london-adventure',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'TE': 'Trailers',
}
x = requests.get(url,headers=headers)
code=decode_watch(x)
logging.warning('code:'+code)
cookies={}
for key, morsel in x.cookies.items():
cookies[key] = morsel
headers['Referer']=url
cookies[ 'bbl']= '2'
cookies[ 'BB_plg']= 'pm'
response = requests.get('https://www.watchcartoononline.io'+code, headers=headers,cookies=cookies)
logging.warning(response.cookies)
regex='get\("(.+?)"'
m=re.compile(regex).findall(response.content)[0]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': '*/*',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://www.watchcartoononline.io'+code,
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'TE': 'Trailers',
}
response = requests.get('https://www.watchcartoononline.io'+m, headers=headers,cookies=cookies).json()
f_link=response['cdn']+'/getvid?evid='+response['enc']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
ff_link_head=requests.head(f_link,stream=True).headers
if 'Location' in ff_link_head:
ff_link=ff_link_head['Location']
else:
ff_link=f_link
#ff_link=f_link#requests.get(f_link,headers=headers,stream=True).url
head=urllib.urlencode(headers)
ff_link=ff_link+"|"+head
#ok=xbmc.Player().play(ff_link)
return ff_link
def resolve_direct(url,original_title):
data=json.loads(url)
str_data=[]
links=[]
if len(data)==1:
return 'magnet:?xt=urn:btih:{0}&dn={1}&tr=udp%3A%2F%2Fglotorrents.pw%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A80&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fp4p.arenabg.ch%3A1337&tr=udp%3A%2F%2Ftracker.internetwarriors.net%3A1337'.format(data[0][5],urllib.quote_plus(original_title))
for items in data:
str_data.append('[COLOR gold]'+str(items[1])+'[/COLOR] | Size: '+str(items[2])+' | S:'+str(items[3])+'/P:'+str(items[4]))
links.append('magnet:?xt=urn:btih:{0}&dn={1}&tr=udp%3A%2F%2Fglotorrents.pw%3A6969%2Fannounce&tr=udp%3A%2F%2Ftracker.openbittorrent.com%3A80&tr=udp%3A%2F%2Ftracker.coppersurfer.tk%3A6969&tr=udp%3A%2F%2Fp4p.arenabg.ch%3A1337&tr=udp%3A%2F%2Ftracker.internetwarriors.net%3A1337'.format(items[5],urllib.quote_plus(original_title)))
ret = xbmcgui.Dialog().select("Choose", str_data)
if ret!=-1:
return links[ret]
else:
return 'bad'
def resolve_mvmax(url,name,year):
if len(year)>2:
url2='http://api.themoviedb.org/3/search/movie?api_key=1248868d7003f60f2386595db98455ef&query=%s&year=%s&language=he&append_to_response=origin_country&page=1'%(name,year)
else:
url2='http://api.themoviedb.org/3/search/movie?api_key=1248868d7003f60f2386595db98455ef&query=%s&language=he&append_to_response=origin_country&page=1'%(name)
logging.warning(url2)
y=requests.get(url2).json()
try:
id=y['results'][0]['id']
except:
id=''
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
x=requests.get(url,headers=headers).content
regex='<div class="container">.+?a href="(.+?)"'
m=re.compile(regex,re.DOTALL).findall(x)[0]
x=requests.get(m,headers=headers).content
regex='iframe src="(.+?)"'
m2=re.compile(regex,re.DOTALL).findall(x)[0]
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': m,
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
'TE': 'Trailers',
}
x = requests.get(m2, headers=headers).content
regex="var mp4 = '(.+?)'"
m=re.compile(regex,re.DOTALL).findall(x)[0]
return m,str(id)
def play(name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id,auto_play=False,windows_play=False,auto_fast=False,nextup=False,f_auto_play=False):
logging.warning(url)
if 'moviesmax.net' in url:
o_url=url
url,id=resolve_mvmax(url,name,data)
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' and type='%s'"%(name.replace("'"," "),'movie'))
match = dbcur.fetchone()
logging.warning('hislink')
if match==None:
dbcur.execute("INSERT INTO AllData Values ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (name.replace("'"," "),o_url,iconimage,fanart,description.replace("'"," "),data,original_title.replace("'"," "),season,episode,id,eng_name.replace("'"," "),show_original_year,'Direct link',isr,'movie'))
dbcon.commit()
if 0:#'tt' in id:
try:
url22='https://api.themoviedb.org/3/find/%s?api_key=b7cd3340a794e5a2f35e3abb820b497f&language=en&external_source=imdb_id'%id
x=requests.get(url22).json()
if 'movie_results' in x:
id=str(x['movie_results'][0]['id'])
else:
id=str(x['tv_results'][0]['id'])
except:
pass
if 'DIRECT link' in url:
url=resolve_direct(url,original_title)
if url=='bad':
return ''
dbcur.execute("SELECT * FROM AllData WHERE original_title = '%s' and type='%s'"%(name.replace("'"," "),'movie'))
match = dbcur.fetchone()
logging.warning('hislink')
if match==None:
dbcur.execute("INSERT INTO AllData Values ('%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (name.replace("'"," "),url,iconimage,fanart,description.replace("'"," "),data,original_title.replace("'"," "),season,episode,id,eng_name.replace("'"," "),show_original_year,'Direct link',isr,'movie'))
dbcon.commit()
name=name.replace('Cached ','')
if 'watchcartoononline' in url:
url=reolve_watchcartoononline(url)
try:
global silent_mode,list_index,playing_text,mag_start_time_new
regex='sss (.+?) sss'
if 'sublink' in url:
logging.warning('solving sublink')
url=GetSublinks(name,url,iconimage,fanart,clean_name(original_title,1),data,id)
if not url:
return '0'
if name==None:
name=original_title
if 'nbareplayhd.com' in url or 'nflhdreplay' in url:
url=nba_solve(url)
match=re.compile(regex).findall(description)
if len(match)>0:
impmodule = __import__(match[0].replace('.py',''))
s_type=[]
pre_url=url
try:
dbcur.execute("INSERT INTO historylinks Values ('%s','GOOD','')"%pre_url.encode('base64'))
dbcon.commit()
except Exception as e:
pass
try:
base=impmodule.source()
except:
try:
base=impmodule.sources()
except:
s_type='universal'
if 'pageURL' in url:
url=json.loads(url)
try:
url=base.resolve(url)
except:
pass
#if 'uptobox' in url:
# url=get_uptobox(url)
url=url.replace('https://www.rapidvideo.com/e/','https://www.rapidvideo.com/v/')
url=url.replace('oload.download','openload.co')
start_time=time.time()
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp = xbmcgui.DialogProgress()
dp.create("Start Playing", "Please Wait", '')
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Start Playing', '')
elapsed_time = time.time() - start_time
playing_text='Start Playing$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
o_name=name
url=url.replace("'",'%27')
if 'youtube' in url and 'embed/' in url:
url=url.replace('embed/','watch?v=')
subtitlesUrl = None
wall_link=False
if 'kanapi.' in url:
url=get_kan(url)
if 'Redirector' in url:
url=requests.get(url,stream=True).url
o_plot=description
add_ace=''
if url=='aceplay' or '/ace/getstream' in url:
url=cache.get(refresh_ace,24,name, table='cookies')
add_ace='__aceplay__'
if Addon.getSetting("dp_play")=='true' and windows_play==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Save to DB', '')
elapsed_time = time.time() - start_time
playing_text='Save to DB$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
url=url.replace('vidcloud.co','vcstream.to')
if url=='latest_movie':
dbcur.execute("SELECT * FROM lastlinkmovie WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
if 'http' not in url and 'plugin' not in url and 'magnet:' not in url:
url=url.decode('base64')
elif url=='latest_tv':
dbcur.execute("SELECT * FROM lastlinktv WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
if 'http' not in url and 'plugin' not in url and 'magnet:' not in url:
url=url.decode('base64')
if 'http' not in url and 'magnet:' not in url and not os.path.exists(url) and 'ftp:' not in url and '-sdarot-' not in o_plot and '-Sdarot-' not in o_plot and 'plugin' not in url:
url='http'+url
url=url.strip().replace('\n','').replace('\t','').replace('\r','')
if '$$$' in url:
links=url.split('$$$')
regex='\[\[(.+?)\]\]'
match=re.compile(regex).findall(str(links))
if len(match)==0:
regex='//(.+?)/'
match=re.compile(regex).findall(str(links))
ret = xbmcgui.Dialog().select("Choose", match)
if ret!=-1:
ff_link=links[ret]
regex='\[\[(.+?)\]\]'
match2=re.compile(regex).findall(links[ret])
if len(match2)>0:
ff_link=ff_link.replace(match2[0],'').replace('[','').replace(']','')
url=ff_link.strip()
else:
sys.exit()
regex='\[\[(.+?)\]\]'
match=re.compile(regex).findall(str(url))
ff_link=url
if len(match)>0:
for items in match:
ff_link=ff_link.replace(items,'').replace('[','').replace(']','')
url=ff_link.strip()
url=url.replace('[[]]','')
if '-KIDSSECTION-' not in o_plot:
if season!=None and season!="%20":
table_name='lastlinktv'
else:
table_name='lastlinkmovie'
dbcur.execute("SELECT * FROM %s WHERE url='%s'"%(table_name,url))
match = dbcur.fetchone()
if match==None:
test1=[]
test1.append((table_name,name,url.encode('base64'),iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id))
dbcur.execute("UPDATE %s SET name='%s',url='%s',iconimage='%s',fanart='%s',description='%s',data='%s',season='%s',episode='%s',original_title='%s',saved_name='%s',heb_name='%s',show_original_year='%s',eng_name='%s',isr='%s',prev_name='%s',id='%s' WHERE o_name = 'f_name'"%(table_name,name.replace("'","%27"),url.encode('base64'),iconimage,fanart,description.replace("'","%27"),str(data).replace("'","%27"),season,episode,original_title.replace("'","%27"),saved_name.replace("'","%27"),heb_name.replace("'"," "),show_original_year,eng_name.replace("'","%27").replace("'","%27"),isr,prev_name.replace("'","%27"),id))
dbcon.commit()
tmdbKey = '1248868d7003f60f2386595db98455ef'
silent_mode=True
year=data
if len (saved_name)<3:
saved_name=name
if Addon.getSetting("dp_play")=='true' and windows_play==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting IMDB', '')
elapsed_time = time.time() - start_time
playing_text='Getting IMDB$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if season!=None and season!="%20":
tv_movie='tv'
url2='http://api.themoviedb.org/3/tv/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
else:
tv_movie='movie'
url2='http://api.themoviedb.org/3/movie/%s?api_key=%s&language=en&append_to_response=external_ids'%(id,tmdbKey)
if 'tt' not in id:
try:
imdb_id=requests.get(url2).json()['external_ids']['imdb_id']
except:
imdb_id=" "
else:
imdb_id=id
url3='https://api.themoviedb.org/3/find/%s?api_key=1248868d7003f60f2386595db98455ef&language=en-US&external_source=imdb_id'%imdb_id
xx=requests.get(url3).json()
if tv_movie=='tv':
if len(xx['tv_results'])>0:
id=str(xx['tv_results'][0]['id'])
else:
if len(xx['movie_results'])>0:
id=str(xx['movie_results'][0]['id'])
if 1:#try:
video_data={}
logging.warning('Names')
logging.warning(saved_name)
fixed_name=saved_name
#fixed_name=fix_name_origin(saved_name,original_title)
logging.warning(fixed_name)
if season!=None and season!="%20":
video_data['TVshowtitle']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['mediatype']='tvshow'
else:
video_data['mediatype']='movies'
video_data['OriginalTitle']=original_title.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['title']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
video_data['poster']=fanart
video_data['fanart']=fanart
if add_ace!='':
o_plot=o_plot+add_ace
video_data['plot']=o_plot+'\n_from_Destiny_'
video_data['icon']=iconimage
video_data['year']=data
video_data['season']=season
video_data['episode']=episode
video_data['imdb']=imdb_id
video_data['code']=imdb_id
if '-HebDub-' in o_plot or '-KIDSSECTION-' in o_plot or wall_link or 'besttv1.cdn' in url:
video_data[u'mpaa']=unicode('heb')
video_data['imdbnumber']=imdb_id
video_data['imdb_id']=imdb_id
video_data['IMDBNumber']=imdb_id
video_data['genre']=imdb_id
#logging.warning(video_data)
#sys.exit()
from run import get_links
'''
c_head={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0'}
if "|" in url:
import urlparse
import mimetools
from StringIO import StringIO
headers_g2=url.split("|")[1]
c_head = dict(x.split('=') for x in headers_g2.split('&'))
link2=url.split("|")[0]
else:
link2=url
'''
if Addon.getSetting("dp_play")=='true' and windows_play==False:
elapsed_time = time.time() - start_time
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Checking Links', '')
elapsed_time = time.time() - start_time
playing_text='Checking Links$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
resolver_supporteds=cache.get(resolver_supported, 72, table='pages')
url_host_pre=re.compile('//(.+?)/').findall(url)
url_host='No'
if len(url_host_pre)>0:
url_host=url_host_pre[0]
if '.' in url_host:
url_host=url_host.split('.')[0]
try:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
except:
host='no'
rd_domains=cache.get(get_rd_servers, 72, table='pages')
if rd_domains==None:
rd_domains=[]
if 0:#host not in rd_domains and url_host not in resolver_supporteds and 'nitroflare' not in url and 'plugin' not in url and 'magnet:' not in url and 'ftp://' not in link2:
try:
try_head = requests.head(link2,headers=c_head)
check=(try_head.status_code)
except Exception as e:
try:
try_head = requests.head(link2.replace('https','http'),headers=c_head)
check=(try_head.status_code)
except Exception as e:
check='403'
logging.warning(e)
if 'solaris' in o_plot and '- Direct' in o_name:
check='403'
if str(check) =='400' or str(check) =='404' or str(check) =='401' or str(check) =='403':
global all_links_sources
if 'http://127.0.0.1:6878/ace/getstream' in link2:
xbmcgui.Dialog().ok('Acestream Error','Opps ACESTREAM wasnt activated, Go a head and activate it...')
return 0
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Renewing temp link'.encode('utf-8'))))
regex='sss (.+?) sss'
match=re.compile(regex).findall(o_plot)[0]
check=True
try:
impmodule = __import__(match.replace('.py',''))
name1=match.replace('.py','')
except:
check=False
if check:
if len(episode)==1:
episode_n="0"+episode
else:
episode_n=episode
if len(season)==1:
season_n="0"+season
else:
season_n=season
type=[]
type,source_scraper=get_type(impmodule,name1)
items=impmodule
thread=[]
thread.append(Thread(get_links_new,hostDict,imdb_id,name1,type,items,tv_movie,original_title,name,season_n,episode_n,season,episode,show_original_year,id,premiered,False))
#thread.append(Thread(impmodule.get_links,tv_movie,original_title,heb_name,season_n,episode_n,season,episode,show_original_year,id))
thread[0].start()
start_time=time.time()
if Addon.getSetting("dp_play")=='false' and windows_play==False:
dp = xbmcgui.DialogProgress()
dp.create("Renewing links", "Please Wait", '')
elapsed_time = time.time() - start_time
playing_text='Renewing links$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
while thread[0].is_alive():
count_1080=0
count_720=0
count_480=0
count_rest=0
f_result=all_links_sources
for data in f_result:
if 'links' in f_result[data] and len (f_result[data]['links'])>0 :
for links_in in f_result[data]['links']:
name2,links,server,res=links_in
if '1080' in res:
count_1080+=1
elif '720' in res:
count_720+=1
elif '480' in res:
count_480+=1
else:
count_rest+=1
string_dp="1080: [COLOR khaki]%s[/COLOR] 720: [COLOR gold]%s[/COLOR] 480: [COLOR silver]%s[/COLOR] Rest: [COLOR burlywood]%s[/COLOR]"%(count_1080,count_720,count_480,count_rest)
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Renewing links', string_dp)
playing_text='Renewing links$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))+'\n'+string_dp
if Addon.getSetting("dp_play")=='true' and windows_play==False:
if dp.iscanceled() or elapsed_time>30:
impmodule.stop_all=1
if thread[0].is_alive():
thread[0]._Thread__stop()
break
if Addon.getSetting("dp_play")=='false' and windows_play==False:
dp.close()
all_names=[]
all_links=[]
all_q=[]
all_s=[]
all_c=[]
if name1 in f_result:
for links_in in f_result[name1]['links']:
name3,links,server,res=links_in
all_names.append(name3)
all_links.append(links)
all_q.append(res)
all_s.append(server)
all_c.append(name3+' - [COLOR gold]' +res+'[/COLOR] - '+server)
if len(all_links)>0:
if Addon.getSetting("new_source_menu")=='true':
ret=0
else:
ret = xbmcgui.Dialog().select("Choose link "+server, all_c)
if ret!=-1:
url=all_links[ret]
fixed_name=fix_name_origin(all_names[ret],original_title)
video_data['title']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
else:
return 0
else:
url=all_links[0]
fixed_name=fix_name_origin(all_names[0],original_title)
video_data['title']=fixed_name.replace('%20',' ').replace('%3a',':').replace('%27',"'").replace('_',".")
link='OK'
was_error=0
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting direct link', '')
playing_text='Getting direct link$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
logging.warning('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
logging.warning('allow_debrid: '+str(allow_debrid))
if ('magnet' in url or 'limetorrents' in url or '1337x.st' in url or 'ibit.to' in url or 'torrentdownloads.me' in url or 'torrentquest.com' in url or 'eztv.io' in url) and not allow_debrid :
link='OK'
logging.warning('Check magnet Player')
get_torrent_file(silent_mode=True)
else :
if windows_play and auto_fast:
link=get_links(url)
else:
try:
link=get_links(url)
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Bad link try another')))
pass
if 1:
if link=='error' and was_error==0 :
news='''\
Error In Play
Source : %s,
Name:%s
Episode:%s
season:%s
link:%s
Error:%s
location:%s
server:%s
'''
sendy(news%(o_name,original_title,season,episode,url,e,str(lineno),o_plot),'error Des','Des')
playing_text='Error:'+str(e)+'$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if windows_play==False:
window = whats_new('Oops','https://i.gifer.com/ItfD.gif',news%(o_name,original_title,season,episode,url,e,str(lineno),o_plot))
window.doModal()
del window
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.close()
return 0
set_runtime=''
set_total=''
info = {'title': id, 'season': season, 'episode': episode}
mag_start_time='0'
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Checking Last Played Location', '')
playing_text='Checking Last Played Location$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if auto_play==False and Addon.getSetting("adv_watched")=='true' and id!='%20':
AWSHandler.UpdateDB()
res = AWSHandler.CheckWS(info)
if res:
if not res['wflag']:
if res['resumetime']!=None:
choose_time='Continue from '+time.strftime("%H:%M:%S", time.gmtime(float(res['resumetime'])))
#ret = xbmcgui.Dialog().select("Choose", choose_time)
window = selection_time('Menu',choose_time)
window.doModal()
selection = window.get_selection()
del window
if selection==-1:
return 0
if selection==0:
mag_start_time=res['resumetime']
set_runtime=res['resumetime']
set_total=res['totaltime']
#listItem.setProperty('resumetime', res['resumetime'])
#listItem.setProperty('totaltime', res['totaltime'])
elif selection==1:
mag_start_time='0'
set_runtime='0'
set_total=res['totaltime']
#listItem.setProperty('resumetime', '0')
#listItem.setProperty('totaltime', res['totaltime'])
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
if 'magnet' in url or 'limetorrents' in url or '1337x.st' in url or 'ibit.to' in url or 'torrentdownloads.me' in url or 'torrentquest.com' in url or 'eztv.io' in url:
if 'limetorrents' in url:
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting Magnet Link', '')
playing_text='Getting Magnet Link$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='"magnet:(.+?)"'
url='magnet:'+re.compile(regex).findall(x)[0]
if '1337x.st' in url :
x,cook=cloudflare_request('http://www.1337x.st/',headers=base_header)
x=requests.get(url,headers=cook[1],cookies=cook[0]).content
regex='"magnet:(.+?)"'
url='magnet:'+re.compile(regex).findall(x)[0]
if 'torrentquest.com' in url or 'eztv.io' in url:
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting Magnet Link', '')
playing_text='Getting Magnet Link$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='"magnet:(.+?)"'
url='magnet:'+re.compile(regex).findall(x)[0]
if 'torrentdownloads.me' in url:
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting Magnet Link', '')
playing_text='Getting Magnet Link$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='"magnet:(.+?)"'
url='magnet:'+re.compile(regex).findall(x)[0]
if 'ibit.to' in url:
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Getting Magnet Link', '')
playing_text='Getting Magnet Link$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex="'magnet:(.+?)'"
url='magnet:'+re.compile(regex).findall(x)[0].decode("string-escape").replace('X-X','')
'''
if season!=None and season!="%20" and '-KIDSSECTION-' not in o_plot:
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Starting search next episode', '')
playing_text='Starting search next episode$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
time_to_save=int(Addon.getSetting("save_time"))
fav_status='false'
thread=[]
thread.append(Thread(get_next_ep_links,original_title,year,season,str(int(episode)+1),id,eng_name,show_original_year,heb_name,isr,fav_status))
thread[0].start()
'''
if season!=None and season!="%20":
prev_name=original_title
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(0, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),'Save to DB magnet', '')
playing_text='Save to DB Magnet$$$$'+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
dbcur.execute("DELETE FROM sources")
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(100, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),"It's in Kodi's Hands Now", '')
playing_text="It's in Kodi's Hands Now$$$$"+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if allow_debrid:
logging.warning('LOADING TORRENT')
import real_debrid
#real_debrid.RealDebrid().auth()
rd = real_debrid.RealDebrid()
try:
if url.endswith('.torrent') and 'magnet:' not in url:
link=rd.addtorrent(url)
else:
logging.warning('LOADING Single TORRENT')
link=rd.singleMagnetToLink(url)
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
except Exception as e:
logging.warning('RD failed')
logging.warning(e)
get_torrent_file(silent_mode=True)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'RD Failed...Trying Free'.encode('utf-8')+str(e))))
if Addon.getSetting("players_new")!='7':
link=get_torrent_link(url)
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
if link==url:
if Addon.getSetting("subtitles")=='true' and 'tt' in video_data['imdb']:
thread=[]
thread.append(Thread(start_subs, video_data['OriginalTitle'], video_data['imdb'], video_data['season'], video_data['episode'],video_data['title']))
thread[0].start()
resolve_magnet(url,listItem,AWSHandler,info,mag_start_time)
xbmc.sleep(500)
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
return 'ok'
else:
logging.warning('Resolve free magnet')
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
if Addon.getSetting("dp_play")=='true':
dp.close()
logging.warning('Resolve free magnet:'+video_data['imdb'])
if Addon.getSetting("subtitles")=='true' and 'tt' in video_data['imdb']:
logging.warning('Start subs torrent')
thread=[]
thread.append(Thread(start_subs, video_data['OriginalTitle'], video_data['imdb'], video_data['season'], video_data['episode'],video_data['title']))
thread[0].start()
resolve_magnet(url,listItem,AWSHandler,info,mag_start_time)
xbmc.sleep(500)
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
return 'ok'
else:
if Addon.getSetting("players_new")!='7':
link=get_torrent_link(url)
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
if link==url:
if Addon.getSetting("subtitles")=='true' and 'tt' in video_data['imdb']:
thread=[]
thread.append(Thread(start_subs, video_data['OriginalTitle'], video_data['imdb'], video_data['season'], video_data['episode'],video_data['title']))
thread[0].start()
resolve_magnet(url,listItem,AWSHandler,info,mag_start_time)
xbmc.sleep(500)
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
return 'ok'
else:
listItem = xbmcgui.ListItem(video_data['title'], path=link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
listItem.setProperty('resumetime', set_runtime)
listItem.setProperty('totaltime', set_total)
if Addon.getSetting("dp_play")=='true' and windows_play==True:
dp.close()
logging.warning('Magnet Resolved')
if Addon.getSetting("subtitles")=='true' and 'tt' in video_data['imdb']:
thread=[]
logging.warning('in MAgnet Resolved')
thread.append(Thread(start_subs, video_data['OriginalTitle'], video_data['imdb'], video_data['season'], video_data['episode'],video_data['title']))
thread[0].start()
resolve_magnet(url,listItem,AWSHandler,info,mag_start_time)
xbmc.sleep(500)
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
return 'ok'
elapsed_time = time.time() - start_time
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.update(100, ' Please Wait '+ time.strftime("%H:%M:%S", time.gmtime(elapsed_time)),"It's in Kodi's Hands Now", '')
playing_text="It's in Kodi's Hands Now$$$$"+time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
logging.warning('PLAYING NOW33')
logging.warning(windows_play)
logging.warning(auto_play)
if windows_play:
mag_start_time_new=set_runtime
if nextup:
ok=xbmc.Player().play(link,listitem=listItem,windowed=False)
else:
ok=xbmc.Player().play(link,listitem=listItem,windowed=True)
else:
if auto_play==True:
logging.warning('PLAYING NOW22')
ok=xbmc.Player().play(link,listitem=listItem)
if f_auto_play==False:
ok=xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listItem)
else:
ok=xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listItem)
if Addon.getSetting("subtitles")=='true' and 'tt' in video_data['imdb']:
thread=[]
thread.append(Thread(start_subs, video_data['OriginalTitle'], video_data['imdb'], video_data['season'], video_data['episode'],video_data['title']))
thread[0].start()
if wall_link and subtitlesUrl:
x=0
while not xbmc.Player().isPlaying() and x<1000:
xbmc.sleep(10) #wait until video is being played
x+=1
xbmc.sleep(50)
xbmc.Player().setSubtitles(subtitlesUrl)
if Addon.getSetting("dp_play")=='true' and windows_play==False:
dp.close()
if Addon.getSetting("play_first")!='true':
playing_text=''
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
AWSHandler.QueueWS(info)
xbmc.sleep(1000)
if 1:#xbmc.Player().isPlaying():
if Addon.getSetting("use_trak")=='true' and len(id)>1 and id!='%20':
if season!=None and season!="%20":
season_t, episode_t = int('%01d' % int(season)), int('%01d' % int(episode))
i = (post_trakt('/sync/watchlist', data={"shows": [{"seasons": [{"episodes": [{"number": episode_t}], "number": season_t}], "ids": {"tmdb": id}}]}))
else:
i = (post_trakt('/sync/watchlist',data= {"movies": [{"ids": {"tmdb": id}}]}))
try:
if season!=None and season!="%20" and '-KIDSSECTION-' not in o_plot:
time_to_save=int(Addon.getSetting("save_time"))
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0 ):
fav_status='true'
else:
fav_status='false'
thread=[]
thread.append(Thread(get_nex_ep, time_to_save, original_title,year,season,str(int(episode)+1),id,eng_name,show_original_year,heb_name,isr,False,fav_status,prev_name,url,iconimage,fanart,o_plot))
thread[0].start()
#match_a,a,b,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,str(int(episode)+1),id,eng_name,show_original_year,heb_name,isr,False,fav_status,'no','0',table='pages')
if fav_status=='true':
logging.warning('searching next_ep rest')
#match_a= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,str(int(episode)+1),id,eng_name,show_original_year,heb_name,isr,False,'rest','no','0', table='pages')
thread.append(Thread(get_nex_ep, time_to_save, original_title,year,season,str(int(episode)+1),id,eng_name,show_original_year,heb_name,isr,False,'rest',prev_name,url,iconimage,fanart,o_plot))
thread[1].start()
logging.warning('Done Prep')
except Exception as e:
logging.warning('ERRORRRRRRRRRRRRRRR: '+str(e))
pass
if season!=None and season!="%20":
prev_name=original_title
dbcur.execute("DELETE FROM sources")
dbcur.execute("INSERT INTO sources Values ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (prev_name.replace("'","%27"),url,iconimage,fanart,o_plot.replace("'","%27"),year,season,episode,original_title.replace("'","%27"),heb_name.replace("'","%27"),show_original_year,eng_name.replace("'","%27"),isr,id))
dbcur.execute("DELETE FROM nextup")
if season!=None and season!="%20" and Addon.getSetting('nextup')=='true':
dbcur.execute("INSERT INTO nextup Values ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (prev_name.replace("'","%27"),url,iconimage,fanart,o_plot.replace("'","%27"),year,season,str(int(episode)+1),original_title.replace("'","%27"),heb_name.replace("'","%27"),show_original_year,eng_name.replace("'","%27"),isr,id))
logging.warning("INSERT INTO nextup Values ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s');" % (prev_name.replace("'","%27"),url,iconimage,fanart,o_plot.replace("'","%27"),year,season,str(int(episode)+1),original_title.replace("'","%27"),heb_name.replace("'","%27"),show_original_year,eng_name.replace("'","%27"),isr,id))
dbcon.commit()
logging.warning('DONE ALL')
xbmc.sleep(1000)
if 'plugin.video.f4mTester' in url:
xbmc.executebuiltin('Dialog.Close(all, true)')
xbmc.executebuiltin('Dialog.Close(okdialog, true)')
#xbmc.executebuiltin('ReloadSkin()')
xbmc.executebuiltin("Dialog.Close(busydialog)")
return 'ok'
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN Play:'+str(lineno))
logging.warning('inline:'+line)
logging.warning('Error:'+str(e))
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Error', 'inLine:'+str(lineno))).encode('utf-8'))
done_nextup=1
marked_trk=1
def last_played_c():
dbcur.execute("SELECT * FROM lastlinkmovie WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
try:
if url!=' ':
if 'http' not in url:
url=url.decode('base64')
addLink('[COLOR gold]Last Movie Link[/COLOR]','latest_movie',5,False,iconimage,fanart,description,data=show_original_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=prev_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year)
except Exception as e:
logging.warning(e)
pass
dbcur.execute("SELECT * FROM lastlinktv WHERE o_name='f_name'")
match = dbcur.fetchone()
if match!=None:
f_name,name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id=match
try:
if url!=' ':
if 'http' not in url:
url=url.decode('base64')
addLink('[COLOR gold]Latest Show Link[/COLOR]'.decode('utf8'), 'latest_tv',5,False,iconimage,fanart,description,data=show_original_year,original_title=original_title,season=season,episode=episode,id=id,saved_name=saved_name,prev_name=prev_name,eng_name=eng_name,heb_name=heb_name,show_original_year=show_original_year)
except Exception as e:
logging.warning(e)
pass
addNolink('[COLOR gold]Latest Sources[/COLOR]'.encode('utf8'), url,75,False,iconimage='https://ak6.picdn.net/shutterstock/videos/13058996/thumb/1.jpg',fanart='https://pixelz.cc/wp-content/uploads/2018/06/the-last-of-us-ellie-and-joel-uhd-4k-wallpaper.jpg')
def display_results(url):
all_f_links=json.loads(url)
text_f=''
text_nf=''
for name_f in all_f_links:
if name_f!='subs' and Addon.getSetting(name_f)=='true':
count_1080=0
count_720=0
count_480=0
count_rest=0
for name,link,server,quality in all_f_links[name_f]['links']:
if '1080' in quality:
count_1080+=1
elif '720' in quality:
count_720+=1
elif '480' in quality:
count_480+=1
else:
count_rest+=1
if len(all_f_links[name_f]['links'])>0:
string_dp="1080: [COLOR khaki]%s[/COLOR] 720: [COLOR gold]%s[/COLOR] 480: [COLOR silver]%s[/COLOR] Rest: [COLOR burlywood]%s[/COLOR]"%(count_1080,count_720,count_480,count_rest)
text_f=text_f+name_f+' : '+string_dp+'\n'
else:
text_nf=text_nf+name_f+' : [COLOR red]NOT FOUND[/COLOR]'+'\n'
showText('Results', text_f+text_nf)
def get_m3u8():
addNolink('[COLOR gold][I]To Activate Using Proxy, Add Proxy in the File Name[/I][/COLOR]','www',999,False,iconimage='http://cdn.marketplaceimages.windowsphone.com/v8/images/31edc250-11db-47c3-ad08-712fb1082435?imageType=ws_icon_large',fanart='https://blog.keycdn.com/blog/wp-content/uploads/2014/11/live-streaming-1-768x384.png')
for i in range(0,5):
nam=Addon.getSetting("M3u8_name-"+str(i))
list=Addon.getSetting("M3u8_addr-"+str(i))
if len(nam)>0 and 'http' in list:
addDir3(nam.decode('utf8').replace('.m3u',''),list,56,'http://cdn.marketplaceimages.windowsphone.com/v8/images/31edc250-11db-47c3-ad08-712fb1082435?imageType=ws_icon_large','https://blog.keycdn.com/blog/wp-content/uploads/2014/11/live-streaming-1-768x384.png',nam.decode('utf8').replace('.m3u8',''))
onlyfiles = [f for f in listdir(m3_path) if isfile(join(m3_path, f))]
for file in onlyfiles:
addDir3(file.decode('utf8').replace('.m3u',''),os.path.join(m3_path, file).encode('utf8'),56,'http://cdn.marketplaceimages.windowsphone.com/v8/images/31edc250-11db-47c3-ad08-712fb1082435?imageType=ws_icon_large','https://blog.keycdn.com/blog/wp-content/uploads/2014/11/live-streaming-1-768x384.png',file.decode('utf8').replace('.m3u8',''))
onlyfiles = [f for f in listdir(m3_dir) if isfile(join(m3_dir, f))]
for file in onlyfiles:
addDir3(file.decode('utf8').replace('.m3u',''),os.path.join(m3_dir, file).encode('utf8'),56,'http://cdn.marketplaceimages.windowsphone.com/v8/images/31edc250-11db-47c3-ad08-712fb1082435?imageType=ws_icon_large','https://blog.keycdn.com/blog/wp-content/uploads/2014/11/live-streaming-1-768x384.png',file.decode('utf8').replace('.m3u8',''))
def m3u8_cont(name,url):
if 'http' in url:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
file_data=requests.get(url,headers=headers).content.replace('\n\n','\n').replace('\r','').replace('\t','').split('\n')
else:
s_file=url.decode('utf8')
file = open(s_file, 'r')
file_data= file.readlines()
file.close()
if len(file_data)>200 and Addon.getSetting("group_m3u")=='true':
all_letters=[]
for r in range(0, len(file_data)/200):
addDir3('Group - '+str(r),url,66,iconimage='http://cdn.marketplaceimages.windowsphone.com/v8/images/31edc250-11db-47c3-ad08-712fb1082435?imageType=ws_icon_large',fanart='https://blog.keycdn.com/blog/wp-content/uploads/2014/11/live-streaming-1-768x384.png',description=str(r))
else:
for data in file_data:
if '#EXTINF' in data:
line_d=data.split(",")
display_name=line_d[1]
icon=' '
if len(line_d)>1:
if 'tvg-logo' in line_d[0]:
regex=' tvg-logo=(.+?)"'
match=re.compile(regex).findall(line_d[0])
if len(match)>0:
icon=match[0].replace('"','')
else:
icon=' '
elif 'http' in data:
url=data
if 'proxy' in name:
url='plugin://plugin.video.f4mTester/?streamtype=TSDOWNLOADER&url='+url
addLink(display_name,url,5,False,iconimage=icon,fanart=icon,description=display_name+'__music__')
'''
matches=re.compile('^#EXTINF:(.*?),(.*?)$\n^(.*?)$',re.I+re.M+re.U+re.S).findall(file_data)
logging.warning('2')
for params, display_name, url in matches:
if 'tvg-logo' in params:
regex=' tvg-logo=(.+?)"'
match=re.compile(regex).findall(params)
icon=match[0].replace('"','')
else:
icon=' '
if len(icon)==0:
icon=' '
addLink(display_name,url,5,False,iconimage=icon,fanart=icon,description=display_name)
'''
def get_group_m3u8(url,plot):
if 'http' in url:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
file_data=requests.get(url,headers=headers).content.replace('\n\n','\n').replace('\r','').replace('\t','').split('\n')
else:
s_file=url.decode('utf8')
file = open(s_file, 'r')
file_data= file.readlines()
file.close()
start=int(plot)*100
end=int(plot)*100+100
counter=0
for data in file_data:
if '#EXTINF' in data:
line_d=data.split(",")
display_name=line_d[1]
icon=' '
if len(line_d)>1:
if 'tvg-logo' in line_d[0]:
regex=' tvg-logo=(.+?)"'
match=re.compile(regex).findall(line_d[0])
if len (match)>0:
icon=match[0].replace('"','')
else:
icon=' '
elif 'http' in data:
url=data
if counter>=start and counter<=end:
addLink(display_name,url,5,False,iconimage=icon,fanart=icon,description=display_name+'__music__')
counter+=1
def fix_data(data):
return data.replace('[',' ').replace(']',' ').replace(' ','').replace("\\"," ").replace("\n"," ").replace("\r"," ").replace("\t"," ")
def eng_anim():
addDir3('Cartoon'.decode('utf8'),'http://api.animetoon.tv/GetAllCartoon',59,'https://png.pngtree.com/element_pic/00/16/12/07584794601cb2b.jpg','https://wallpapersite.com/images/pages/pic_w/2604.jpg','Cartoon'.decode('utf8'))
addDir3('Anime'.decode('utf8'),'http://api.animetoon.tv/GetAllDubbed',59,'https://i.imgur.com/e4Crf1p.jpg','http://www.tokkoro.com/picsup/2618741-anime-4k-full-desktop-wallpaper.jpg','Anime'.decode('utf8'))
addDir3('[COLOR aqua][I]Search[/I][/COLOR]'.decode('utf8'),'search',62,'https://upload.wikimedia.org/wikipedia/commons/0/0e/Wikipe-tan_sailor_fuku.png','https://worldwithouthorizons.com/wp-content/uploads/Artsy-2016-4K-Anime-Wallpaper-1280x720.jpg','Search'.decode('utf8'))
def download_img(local_filename,cook,url):
if os.path.exists(local_filename):
return 0
r = requests.get(url,headers=cook[1],cookies=cook[0], stream=True)
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename
def next_anime(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers).json()
for items in x:
if items['description']!=None:
plot=items['description']
else:
plot=' '
video_data={}
video_data['title']=items['name']
video_data['poster']='http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg'
video_data['plot']=plot
video_data['icon']='http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg'
if items['released']!=None:
video_data['year']=items['released'].split('-')[0]
video_data['rating']=str(int(items['rating']))
addDir3(items['name'],items['id'],60,'http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg','http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg',plot,video_info=video_data)
def anime_ep(url,image):
headers = {
'App-Version': '8.0',
'App-Name': '#Toonmania',
'App-LandingPage': 'http://www.mobi24.net/toon.html',
'Host': 'api.animetoon.tv',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'utf-8',
'User-Agent': 'okhttp/2.3.0'
}
x=requests.get('http://api.animetoon.tv/GetDetails/'+url,headers=headers).json()
for items in x['episode']:
video_data={}
video_data['title']=items['name']
video_data['poster']=image
video_data['plot']=items['name']
video_data['icon']=image
if items['date']!=None:
video_data['year']=items['date'].split('-')[0]
addLink(items['name'],items['id'],61,False,image,image,items['name'],video_info=json.dumps(video_data))
def play_anime(name,url,iconimage):
headers = {
'App-Version': '8.0',
'App-Name': '#Toonmania',
'App-LandingPage': 'http://www.mobi24.net/toon.html',
'Host': 'api.animetoon.tv',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'utf-8',
'User-Agent': 'okhttp/2.3.0'
}
x=requests.get('http://api.animetoon.tv/GetVideos/'+url,headers=headers).json()
all_hosts=[]
all_links=[]
for items in x:
regex='//(.+?)/'
host=re.compile(regex).findall(items[0])[0]
all_hosts.append(host)
all_links.append(items[0])
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
ret = xbmcgui.Dialog().select("Choose", all_hosts)
if ret!=-1:
temp = None
r = requests.get(all_links[ret],headers=headers)
logging.warning(all_links[ret])
html = r.text
if 'var video_links' in html:
# Try the generic videozoo \ play44 solve first:
temp = re.findall(r'''var video_links.*?['"]link['"]\s*?:\s*?['"](.*?)['"]''', html, re.DOTALL)
else:
# Try variants:
temp = re.findall(r'''{\s*?url\s*?:\s*?['"](.*?)['"]''', html, re.DOTALL)
if not temp:
temp = re.findall(r'''file\s*?:\s*?['"](.*?)['"]''', html, re.DOTALL)
f_link=(temp[0].replace(r'\/', r'/')) # In case there's escaped JS slashes.
logging.warning(f_link)
video_data={}
video_data['title']=name
video_data['poster']=iconimage
video_data['icon']=iconimage
listItem = xbmcgui.ListItem(video_data['title'], path=f_link)
listItem.setInfo(type='Video', infoLabels=video_data)
listItem.setProperty('IsPlayable', 'true')
ok=xbmcplugin.setResolvedUrl(handle=int(sys.argv[1]), succeeded=True, listitem=listItem)
def search_anime():
search_entered=''
keyboard = xbmc.Keyboard(search_entered, 'Enter search')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get('http://api.animetoon.tv/GetAllCartoon',headers).json()
all_r=[]
for items in x:
if search_entered.lower() in items['name'].lower():
if items['description']!=None:
plot=items['description']
else:
plot=' '
all_r.append((items['name'],'http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg',items['id'],plot))
x=requests.get('http://api.animetoon.tv/GetAllDubbed',headers).json()
for items in x:
if search_entered.lower() in items['name'].lower():
if items['description']!=None:
plot=items['description']
else:
plot=' '
all_r.append((items['name'],'http://www.animetoon.tv/images/series/big/'+items['id']+'.jpg',items['id'],plot))
for items in all_r:
video_data={}
video_data['title']=items[0]
video_data['poster']=items[1]
video_data['plot']=items[3]
video_data['icon']=items[1]
addDir3(items[0],items[2],60,items[1],items[1],items[3],video_info=video_data)
def add_remove_trakt(name,original_title,id,season,episode):
if original_title=='add':
if name=='tv':
season_t, episode_t = int('%01d' % int(season)), int('%01d' % int(episode))
i = (post_trakt('/sync/history', data={"shows": [{"seasons": [{"episodes": [{"number": episode_t}], "number": season_t}], "ids": {"tmdb": id}}]}))
else:
i = (post_trakt('/sync/history',data= {"movies": [{"ids": {"tmdb": id}}]}))
elif original_title=='remove':
if name=='tv':
season_t, episode_t = int('%01d' % int(season)), int('%01d' % int(episode))
i = (post_trakt('/sync/history/remove', data={"shows": [{"seasons": [{"episodes": [{"number": episode_t}], "number": season_t}], "ids": {"tmdb": id}}]}))
else:
i = (post_trakt('/sync/history/remove',data= {"movies": [{"ids": {"tmdb": id}}]}))
if 'added' in i:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Marked as Watched'.encode('utf-8'))))
elif 'deleted' in i:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Watched Removed'.encode('utf-8'))))
else:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Error Something Went Wrong'.encode('utf-8'))))
xbmc.executebuiltin('Container.Refresh')
def download_file(url):
from run import get_links
idm_folder=Addon.getSetting("idm_folder")
if idm_folder.endswith('\\'):
idm_folder=idm_folder[:-1]
o_folder=os.path.join(idm_folder,'idman.exe')
split_folder=idm_folder.split('\\')
f_folder=''
c=0
for item in split_folder:
if c==0:
c=1
f_folder=f_folder+item+'\\'
else:
f_folder=f_folder+'"'+item+'"'+'\\'
idm_path=os.path.join(f_folder,'idman.exe')
if not os.path.exists(o_folder):
xbmcgui.Dialog().ok('Error Occurred',"IDM Wasn't Installed or Wrong Directory in Settings")
sys.exit()
f_link=get_links(url)
if Addon.getSetting("dialog_idm")=='true':
os.system(idm_path+' /d "%s" /n'%f_link)
else:
os.system(idm_path+' /d "%s"'%f_link)
def cartoon():
url='https://www.watchcartoononline.io/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='<ul id="nav">(.+?)</ul>'
match_pre=re.compile(regex,re.DOTALL).findall(x)
regex='<li><a href="(.+?)">(.+?)</a></li>'
match=re.compile(regex).findall(match_pre[0])
for link,name in match:
if name!='Home' and name!='Contact':
addDir3(name.decode('utf8'),link,69,'http://www.cartoon-media.eu/files/library/Cartoon-Movie/2018/JungleBunch_square.jpg?thumb=media-pt','http://digitalspyuk.cdnds.net/16/31/980x490/landscape-1470221630-cartoon-heroes.jpg',name.decode('utf8'))
def cartoon_list(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='<a name="#"></a><p class="sep">#</p><ul>(.+?)</div>'
match_pre=re.compile(regex,re.DOTALL).findall(x)
regex='<li><a href="(.+?)" title="(.+?)"'
match=re.compile(regex).findall(match_pre[0])
if len(match)==0:
regex='<li><a href="(.+?)">(.+?)</a></li>'
match=re.compile(regex).findall(match_pre[0])
for link,title in match:
addLink(title,link,5,False,iconimage='http://www.cartoon-media.eu/files/library/Cartoon-Movie/2018/JungleBunch_square.jpg?thumb=media-pt',fanart='http://digitalspyuk.cdnds.net/16/31/980x490/landscape-1470221630-cartoon-heroes.jpg',description=title)
else:
for link,name in match:
if name!='Home' and name!='Contact':
addDir3(name.decode('utf8'),link,70,'http://www.cartoon-media.eu/files/library/Cartoon-Movie/2018/JungleBunch_square.jpg?thumb=media-pt','http://digitalspyuk.cdnds.net/16/31/980x490/landscape-1470221630-cartoon-heroes.jpg',name.decode('utf8'))
def cartoon_episodes(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
regex='<meta property="og:image" content="(.+?)"'
image_p=re.compile(regex).findall(x)
if len(image_p)>0:
image=image_p[0]
else:
image=' '
regex_pre='<div id="catlist-listview"(.+?)</ul>'
m_p=re.compile(regex,re.DOTALL).findall(x)[0]
regex='li><a href="(.+?)" rel="bookmark" title=".+?" class="sonra">(.+?)<'
m_p=re.compile(regex).findall(x)
logging.warning(url)
for link,title in m_p:
addLink(title,link,5,False,iconimage=image,fanart=image,description=title)
def by_actor(url):
if url=='www':
url='1'
link='https://api.themoviedb.org/3/person/popular?api_key=1248868d7003f60f2386595db98455ef&language=en-US&page=%s&language=en&sort_by=popularity.desc'%url
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
html=requests.get(link,headers=headers).json()
for items in html['results']:
icon=items['profile_path']
fanart=items['known_for'][0]['backdrop_path']
if icon==None:
icon=' '
else:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
if fanart==None:
fanart=' '
else:
fanart=domain_s+'image.tmdb.org/t/p/original/'+fanart
addDir3(items['name'],str(items['id']),73,icon,fanart,items['name'])
addDir3('[COLOR aqua][I]Next Page[/COLOR][/I]',str(int(url)+1),72,' ',' ','[COLOR aqua][I]Next Page[/COLOR][/I]')
def actor_m(url):
choise=['Tv Shows','Movies']
ret = xbmcgui.Dialog().select("Choose", choise)
if ret!=-1:
if ret==0:
tv_mode='tv'
else:
tv_mode='movie'
else:
sys.exit()
if tv_mode=='movie':
link='https://api.themoviedb.org/3/person/%s?api_key=1248868d7003f60f2386595db98455ef&append_to_response=credits&language=en&sort_by=popularity.desc'%url
else:
link='https://api.themoviedb.org/3/person/%s/tv_credits?api_key=1248868d7003f60f2386595db98455ef&append_to_response=credits&language=en&sort_by=popularity.desc'%url
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
html=requests.get(link,headers=headers).json()
if tv_mode=='movie':
url_g=domain_s+'api.themoviedb.org/3/genre/movie/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
else:
url_g=domain_s+'api.themoviedb.org/3/genre/tv/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
html_g=requests.get(url_g,headers=headers).json()
if tv_mode=='movie':
test=html['credits']['cast']
mode=4
else:
test=html['cast']
mode=7
for items in test:
icon=items['poster_path']
fanart=items['backdrop_path']
if icon==None:
icon=' '
else:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
if fanart==None:
fanart=' '
else:
fanart=domain_s+'image.tmdb.org/t/p/original/'+fanart
plot=items['overview']
if tv_mode=='movie':
original_title=items['original_title']
else:
original_title=items['original_name']
id=items['id']
rating=items['vote_average']
if tv_mode=='movie':
title=items['title']
else:
title=items['name']
if 'first_air_date' in items:
if items['first_air_date']==None:
year=' '
else:
year=str(items['first_air_date'].split("-")[0])
else:
if 'release_date' in items:
if items['release_date']==None:
year=' '
else:
year=str(items['release_date'].split("-")[0])
else:
year=' '
genres_list= dict([(i['id'], i['name']) for i in html_g['genres'] \
if i['name'] is not None])
genere = u' / '.join([genres_list[x] for x in items['genre_ids']])
#except:genere=''
video_data={}
video_data['title']=title
video_data['poster']=fanart
video_data['plot']=plot
video_data['icon']=icon
video_data['genre']=genere
video_data['rating']=rating
video_data['year']=year
addDir3(title,'www',mode,icon,fanart,plot,data=year,original_title=original_title,id=str(id),rating=rating,heb_name=title,show_original_year=year,isr=' ',generes=genere,video_info=video_data)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_SORT_TITLE)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_YEAR)
xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_VIDEO_RATING)
def search_actor():
search_entered=''
keyboard = xbmc.Keyboard(search_entered, 'Enter Search')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
link='https://api.themoviedb.org/3/search/person?api_key=1248868d7003f60f2386595db98455ef&language=en&query=%s&page=1&include_adult=false'%search_entered
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
html=requests.get(link,headers=headers).json()
for items in html['results']:
icon=items['profile_path']
fanart=items['known_for'][0]['backdrop_path']
if icon==None:
icon=' '
else:
icon=domain_s+'image.tmdb.org/t/p/original/'+icon
if fanart==None:
fanart=' '
else:
fanart=domain_s+'image.tmdb.org/t/p/original/'+fanart
addDir3(items['name'],str(items['id']),73,icon,fanart,items['name'])
def fix_links(all_f_links,iconimage,image,plot,show_original_year,season,episode):
all_data=[]
all_rd_s={}
all_rd_servers=[]
try:
count_r=0
if season!=None and season!="%20":
tv_movie='tv'
else:
tv_movie='movie'
for name_f in all_f_links:
if name_f!='subs' :
for name,link,server,quality in all_f_links[name_f]['links']:
name=name.decode('utf-8','ignore').encode("utf-8")
fixed_q=fix_q(quality)
se='-%s-'%name_f
if all_f_links[name_f]['rd']==True:
if name_f not in all_rd_servers:
all_rd_servers.append(name_f)
pre='0'
check=False
all_data.append((name_f+name+" - "+server, str(link),iconimage,image,plot,show_original_year,quality,se,fixed_q,name,pre))
if 1:
all_fv=[]
all_rest=[]
if Addon.getSetting("fav_servers_en")=='true' and tv_movie=='movie':
all_fv_servers=Addon.getSetting("fav_servers").split(',')
elif Addon.getSetting("fav_servers_en_tv")=='true' and tv_movie=='tv':
all_fv_servers=Addon.getSetting("fav_servers_tv").split(',')
else:
all_fv_servers=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_data:
if server.replace('-','') in all_fv_servers:
all_fv.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre))
else:
all_rest.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre))
all_fv=sorted(all_fv, key=lambda x: x[8], reverse=False)
all_rest=sorted(all_rest, key=lambda x: x[8], reverse=False)
all_data=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_fv:
all_data.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre))
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_rest:
all_data.append((name,link,icon,image,plot,year,q,server,f_q,saved_name,pre))
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN Fixlinks:'+str(lineno))
logging.warning('inline:'+line)
logging.warning('Error:'+str(e))
return all_data
class selection_time(pyxbmct.AddonDialogWindow):
def __init__(self, title='',item=''):
super(selection_time, self).__init__(title)
self.item=[item,'Play from start']
self.setGeometry(350, 150,1, 1,pos_x=700, pos_y=200)
self.list_index=-1
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
def get_selection(self):
""" get final selection """
return self.list_index
def click_list(self):
self.list_index=self.list.getSelectedPosition()
self.close()
def set_active_controls(self):
# List
self.list = pyxbmct.List()
self.placeControl(self.list, 0,0, rowspan=2, columnspan=1)
# Add items to the list
self.list.addItems(self.item)
# Connect the list to a function to display which list item is selected.
self.connect(self.list, self.click_list)
def set_navigation(self):
self.setFocus(self.list)
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=50',),
('WindowClose', 'effect=fade start=100 end=0 time=50',)])
class whats_new(pyxbmct.AddonDialogWindow):
def __init__(self, title='',img=' ',txt=''):
super(whats_new, self).__init__(title)
self.setGeometry(1000, 600, 4,4)
self.img=img
self.txt=txt
self.set_info_controls()
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
def set_info_controls(self):
self.image = pyxbmct.Image( self.img)
self.placeControl(self.image, 0, 0, 3, 2)
self.textbox = pyxbmct.TextBox(font='Med')
self.placeControl(self.textbox, 0,2, 4, 2)
self.textbox.setText(self.txt)
# Set auto-scrolling for long TexBox contents
self.textbox.autoScroll(3000, 3000, 3000)
def click_c(self):
self.close()
def set_active_controls(self):
# Connect key and mouse events for list navigation feedback.
self.button = pyxbmct.Button('Close')
self.placeControl(self.button, 3, 0)
# Connect control to close the window.
self.connect(self.button, self.click_c)
def set_navigation(self):
# Set navigation between controls
self.button.controlDown(self.button)
self.button.controlUp(self.button)
# Set initial focus
self.setFocus(self.button)
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=500',),
('WindowClose', 'effect=fade start=100 end=0 time=500',)])
def download_subs(f_list,index):
try:
logging.warning(f_list[index][2])
logging.warning(f_list[index][3])
import xmlrpclib,codecs,base64,gzip,StringIO
codePageDict = {'ara': 'cp1256', 'ar': 'cp1256', 'ell': 'cp1253', 'el': 'cp1253', 'heb': 'cp1255', 'he': 'cp1255', 'tur': 'cp1254', 'tr': 'cp1254', 'rus': 'cp1251', 'ru': 'cp1251'}
server = xmlrpclib.Server('http://api.opensubtitles.org/xml-rpc', verbose=0)
token = server.LogIn('', '', 'en', 'XBMC_Subtitles_v1')['token']
content = [f_list[index][2],]
content = server.DownloadSubtitles(token, content)
content = base64.b64decode(content['data'][0]['data'])
content = gzip.GzipFile(fileobj=StringIO.StringIO(content)).read()
try: lang = xbmc.convertLanguage(f_list[index][3], xbmc.ISO_639_1)
except: lang = f_list[index]['SubLanguageID']
subtitle = xbmc.translatePath('special://temp/')
subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang)
logging.warning(subtitle)
codepage = codePageDict.get(lang, '')
if codepage and Addon.getSetting('subtitles.utf') == 'true':
try:
content_encoded = codecs.decode(content, codepage)
content = codecs.encode(content_encoded, 'utf-8')
except:
pass
file = open(subtitle, 'w')
file.write(str(content))
file.close()
xbmc.sleep(1000)
xbmc.Player().setSubtitles(subtitle)
return 'ok'
except Exception as e:
logging.warning(e)
return e
class MySubs(pyxbmct.AddonDialogWindow):
def __init__(self, title='',list=[],f_list=[]):
super(MySubs, self).__init__(title)
self.list_o=list
self.title=title
try:
self.start_time= xbmc.Player().getTime()
except:
self.start_time=0
wd=int(Addon.getSetting("subs_width"))
hd=int(Addon.getSetting("subs_hight"))
px=int(Addon.getSetting("subs_px"))
py=int(Addon.getSetting("subs_py"))
self.full_list=f_list
self.setGeometry(wd, hd, 9, 1,pos_x=px, pos_y=py)
self.time_c=0
self.set_info_controls()
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
Thread(target=self.background_task).start()
def background_task(self):
global list_index
max=int(Addon.getSetting("subs_window"))+self.start_time
self.t=self.start_time
self.t2=self.start_time
once=0
while(self.t2<max):
if Addon.getSetting("auto_subtitles")=='true' and xbmc.Player().isPlaying() and once==0:
once=1
self.label_info.setLabel('Downloading')
result=download_subs(self.list_o,0)
if result=='ok':
self.label_info.setLabel('Ready')
else:
self.label_info.setLabel('Error: '+str(result))
self.label.setLabel(str(int(max-self.t2)))
self.time_c=self.t2
try:
self.t2= xbmc.Player().getTime()
except:
self.t2=self.t
self.t+=1
xbmc.sleep(1000)
list_index=999
self.close()
def set_info_controls(self):
# Label
self.label = pyxbmct.Label(str(int(self.time_c)))
self.placeControl(self.label, 4, 0, 3, 1)
self.label_info = pyxbmct.Label('Waiting for your Selection')
self.placeControl(self.label_info, 0, 0, 1, 1)
def click_list(self):
global list_index
list_index=self.list.getSelectedPosition()
self.t=self.start_time
self.label_info.setLabel('Downloading')
result=download_subs(self.list_o,list_index)
if result=='ok':
self.label_info.setLabel('Ready')
else:
self.label_info.setLabel('Error: '+str(result))
self.t=self.start_time
#self.close()
def click_c(self):
global list_index
list_index=888
current_list_item=''
self.close()
def set_active_controls(self):
# List
self.list = pyxbmct.List()
self.placeControl(self.list, 1, 0, 7, 1)
# Add items to the list
items = self.list_o
n_items=[]
logging.warning('len(n_items)')
logging.warning(len(n_items))
for pre,it,index_in,lan in items:
logging.warning(pre)
if pre==0:
n_items.append('[COLOR lightgreen] [%s] [/COLOR]'%lan+it)
else:
n_items.append('[COLOR gold]'+str(pre)+'%[/COLOR]'+'[COLOR lightgreen] [%s] [/COLOR]'%lan+it)
self.list.addItems(n_items)
# Connect the list to a function to display which list item is selected.
self.connect(self.list, self.click_list)
# Connect key and mouse events for list navigation feedback.
self.button = pyxbmct.Button('Close')
self.placeControl(self.button, 8, 0)
# Connect control to close the window.
self.connect(self.button, self.click_c)
def set_navigation(self):
# Set navigation between controls
self.list.controlDown(self.button)
self.button.controlUp(self.list)
# Set initial focus
self.setFocus(self.list)
def slider_update(self):
# Update slider value label when the slider nib moves
try:
if self.getFocus() == self.slider:
self.slider_value.setLabel('{:.1F}'.format(self.slider.getPercent()))
except (RuntimeError, SystemError):
pass
def radio_update(self):
# Update radiobutton caption on toggle
if self.radiobutton.isSelected():
self.radiobutton.setLabel('On')
else:
self.radiobutton.setLabel('Off')
def list_update(self):
# Update list_item label when navigating through the list.
try:
if self.getFocus() == self.list:
self.list_item_label.setLabel(self.list.getListItem(self.list.getSelectedPosition()).getLabel())
else:
self.list_item_label.setLabel('')
except (RuntimeError, SystemError):
pass
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=100',),
('WindowClose', 'effect=fade start=100 end=0 time=100',)])
class MyAddon(pyxbmct.AddonDialogWindow):
def __init__(self, title='',list=[],time_c=10,img=' ',txt=''):
super(MyAddon, self).__init__(title)
self.list_o=list
self.title=title
wd=int(Addon.getSetting("width"))
hd=int(Addon.getSetting("hight"))
px=int(Addon.getSetting("px"))
py=int(Addon.getSetting("py"))
self.setGeometry(wd, hd, 9, 1,pos_x=px, pos_y=py)
self.time_c=time_c
self.img=img
self.txt=txt
self.set_info_controls()
self.set_active_controls()
self.set_navigation()
# Connect a key action (Backspace) to close the window.
self.connect(pyxbmct.ACTION_NAV_BACK, self.close)
Thread(target=self.background_task).start()
def background_task(self):
global list_index
t=int(self.time_c)*10
while(t>30):
xbmc.sleep(100)
self.label.setLabel(str(int(t)/10))
windowsid=xbmcgui.getCurrentWindowDialogId()
if windowsid==10153 or windowsid==10101:
break
if 'Next episode' in self.title:
try:
t=(xbmc.Player().getTotalTime()-xbmc.Player().getTime())*10
except:
t=0
pass
else:
t-=1
list_index=999
self.close()
def set_info_controls(self):
# Label
self.label = pyxbmct.Label(str(int(self.time_c)))
self.placeControl(self.label, 4, 0, 3, 1)
self.image = pyxbmct.Image( self.img)
self.placeControl(self.image, 0, 0, 2, 1)
self.textbox = pyxbmct.TextBox()
self.placeControl(self.textbox, 2,0, 2, 1)
self.textbox.setText(self.txt)
# Set auto-scrolling for long TexBox contents
self.textbox.autoScroll(1000, 1000, 1000)
def click_list(self):
global list_index
list_index=self.list.getSelectedPosition()
self.close()
def click_c(self):
global list_index
list_index=888
current_list_item=''
self.close()
def set_active_controls(self):
# List
self.list = pyxbmct.List()
self.placeControl(self.list, 4, 0, 4, 1)
# Add items to the list
items = self.list_o
n_items=[]
a_links=[]
for it in items:
n_items.append(it.split('$$$$$$$')[0])
a_links.append(it.split('$$$$$$$')[1])
self.list.addItems(n_items)
# Connect the list to a function to display which list item is selected.
self.connect(self.list, self.click_list)
# Connect key and mouse events for list navigation feedback.
self.button = pyxbmct.Button('Close')
self.placeControl(self.button, 8, 0)
# Connect control to close the window.
self.connect(self.button, self.click_c)
def set_navigation(self):
# Set navigation between controls
self.list.controlDown(self.button)
self.button.controlUp(self.list)
# Set initial focus
self.setFocus(self.list)
def slider_update(self):
# Update slider value label when the slider nib moves
try:
if self.getFocus() == self.slider:
self.slider_value.setLabel('{:.1F}'.format(self.slider.getPercent()))
except (RuntimeError, SystemError):
pass
def radio_update(self):
# Update radiobutton caption on toggle
if self.radiobutton.isSelected():
self.radiobutton.setLabel('On')
else:
self.radiobutton.setLabel('Off')
def list_update(self):
# Update list_item label when navigating through the list.
try:
if self.getFocus() == self.list:
self.list_item_label.setLabel(self.list.getListItem(self.list.getSelectedPosition()).getLabel())
else:
self.list_item_label.setLabel('')
except (RuntimeError, SystemError):
pass
def setAnimation(self, control):
# Set fade animation for all add-on window controls
control.setAnimations([('WindowOpen', 'effect=fade start=0 end=100 time=500',),
('WindowClose', 'effect=fade start=100 end=0 time=500',)])
def nextup():
try:
global list_index,done_nextup,all_s_in,done1
list=[]
time_to_save=int(Addon.getSetting("save_time"))
dbcur.execute("SELECT * FROM nextup")
match = dbcur.fetchone()
fast_link=' '
if match!=None:
name,url,icon,image,plot,year,season,episode,original_title,heb_name,show_original_year,eng_name,isr,id=match
name=str(name)
url=str(url)
icon=str(icon)
image=str(image)
plot=str(plot).replace('%27',"'")
year=str(year)
season=str(season)
episode=str(episode)
original_title=str(original_title)
heb_name=str(heb_name.decode('utf-8')).replace('%27',"'")
show_original_year=str(show_original_year)
eng_name=str(eng_name).replace('%27',"'")
isr=str(isr)
id=str(id)
iconimage=icon
fanart=image
data=year
description=plot.replace('-Episode ','').replace('-NEXTUP-','').encode('utf8')
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0 ):
fav_status='true'
else:
fav_status='false'
if debug_mode==True:
logging.warning('nextup sources')
f_subs=[]
match_a,all_links_fp,all_pre,f_subs= cache.get(c_get_sources, time_to_save, original_title,year,original_title,season,str(int(episode)),id,eng_name,show_original_year,heb_name,isr,False,fav_status,'no','0', table='pages')
all_s_in=( {},100 ,'',4,'')
all_data=fix_links(match_a,iconimage,fanart,description,show_original_year,season,episode)
from tmdb import get_episode_data
name_n,plot_n,image_n=get_episode_data(id,season,str(int(episode)))
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_data:
list.append('[COLOR gold]'+q+'[/COLOR][COLOR lightblue]'+server+'[/COLOR]-'+name+'$$$$$$$'+link)
fast_link=list[0].split('$$$$$$$')[1]
try:
time_left=xbmc.Player().getTotalTime()-xbmc.Player().getTime()
except:
time_left=30
pass
window = MyAddon('Next Episode - '+name_n ,list,time_left,image_n,plot_n)
window.doModal()
del window
play_now=False
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Playlist.Clear", "params": { "playlistid": 0 }, "id": 1}')
if list_index!=999 and list_index!=888:
xbmc.Player().stop()
fast_link=list[list_index].split('$$$$$$$')[1]
#xbmc.executebuiltin(('XBMC.PlayMedia("plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=' '&mode=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s",return)'%(data,urllib.quote_plus(description),eng_name,str(int(episode)+1),urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,name,original_title,season,show_original_year,urllib.quote_plus(fast_link),urllib.quote_plus(fast_link))).replace('EMPTY','%20'))
if Addon.getSetting('play_nextup_wait')=='false' and list_index==999:
return '0'
if list_index==888:
return '0'
if fast_link!=' ':
n_fast_link=fast_link
if Addon.getSetting("fast_play2_tv")=='true':
if list_index==999:
n_fast_link='999'
else:
n_fast_link=fast_link
#
#xbmc.executebuiltin(('XBMC.PlayMedia("plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=' '&mode=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s",return)'%(data,urllib.quote_plus(description),eng_name,str(int(episode)+1),urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,name,original_title,season,show_original_year,urllib.quote_plus(url),urllib.quote_plus(fast_link))).replace('EMPTY','%20'))
#if Addon.getSetting("fast_play2_tv")=='true':
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Playlist.Clear", "params": { "playlistid": 0 }, "id": 1}')
xbmc.Player().stop()
done_nextup=0
#
KODIV = float(xbmc.getInfoLabel("System.BuildVersion")[:4])
if KODIV >= 17:
logging.warning('PLAY NEXTUP')
xbmc.executebuiltin(('Container.update("plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s-NEXTUP-&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=%s' '&mode2=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s&fav_status=%s",return)'%(data,urllib.quote_plus(description),eng_name,str(int(episode)),urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,isr,name,original_title,season,show_original_year,urllib.quote_plus(n_fast_link),urllib.quote_plus(n_fast_link),fav_status)).replace('EMPTY','%20'))
#play(name,fast_link,iconimage,image,description,data,season,episode,original_title,name,heb_name,show_original_year,eng_name,isr,original_title,id,windows_play=True,auto_fast=False,nextup=True)
logging.warning('PLAY NEXTUP FULLSCREEN')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
return '0'
else:
xbmc.executebuiltin(('ActivateWindow(10025,"plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s-NEXTUP-&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=%s' '&mode2=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s&fav_status=%s",return)'%(data,urllib.quote_plus(description),eng_name,str(int(episode)),urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,isr,name,original_title,season,show_original_year,urllib.quote_plus(n_fast_link),urllib.quote_plus(n_fast_link),fav_status)).replace('EMPTY','%20'))
mode2=1999
#sys.exit()
'''
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_data:
listItem=xbmcgui.ListItem(name, iconImage=icon, thumbnailImage=image)
listItem.setInfo('video', {'Title': name, 'Genre': 'Kids'})
link2=(('plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=' '&mode2=4&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s'%(data,urllib.quote_plus(description),eng_name,str(int(episode)+1),urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,name,original_title,season,show_original_year,urllib.quote_plus(link),urllib.quote_plus(link))).replace('EMPTY','%20'))
logging.warning(link2)
playlist.add(url=link2, listitem=listItem)
play_now=True
'''
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN NEXTUP IN:'+str(lineno))
logging.warning('inline:'+line)
logging.warning('Error:'+str(e))
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Error', 'inLine:'+str(lineno))).encode('utf-8'))
done_nextup=1
marked_trk=1
return 0
def prepare_library(dbcon_kodi,dbcur_kodi):
dbcur_kodi.execute("SELECT MAX(idFile) FROM movie")
match = dbcur_kodi.fetchone()
try:
index=match[0]+1
except:
index=0
dbcur_kodi.execute("SELECT MAX(idFile) FROM files")
match = dbcur_kodi.fetchone()
try:
file_index=match[0]+1
except:
file_index=0
dbcur_kodi.execute("SELECT MAX(art_id) FROM art")
match = dbcur_kodi.fetchone()
try:
art_index=match[0]+1
except:
art_index=0
dbcur_kodi.execute("SELECT * FROM genre")
match = dbcur_kodi.fetchall()
all_gen={}
for g_id,nm in match:
all_gen[nm]=g_id
dbcur_kodi.execute("SELECT * FROM path")
found=0
match = dbcur_kodi.fetchall()
for items in match:
if items[0]==99879:
found=1
if found==0:
dbcur_kodi.execute("INSERT INTO path Values ('99879', 'plugin://plugin.video.destinyds/', '', '', '', '','', '', '', '', '', '');")
dbcon_kodi.commit()
return index,file_index,art_index,all_gen
def add_item_to_lib(name,url,mode,icon,fan,plot,year,original_name,id,rating,new_name,isr,genere,trailer,fav_status,index,file_index,art_index,dbcon_kodi,dbcur_kodi,all_gen):
icon_db='<thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb><thumb aspect="poster" preview="img_poster">img_poster</thumb>'
icon_db=icon_db.replace('img_poster',icon)
fanart_db='<fanart><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb><thumb preview="img_poster">img_poster</thumb></fanart>'
fanart_db=fanart_db.replace('img_poster',fan)
link=get_rest_data(name,url,mode,icon,fan,plot,data=year,original_title=original_name,id=id,rating=rating,heb_name=new_name,show_original_year=year,isr=isr,generes=genere,trailer=trailer,fav_status=fav_status).replace(' ','%20')
dbcur_kodi.execute("INSERT INTO movie Values ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s');" % (index,file_index,name.replace("'","''"),plot.replace("'","''"),'','',None,'99879','',None,icon_db,'99879','','','','0',genere,'',original_name.replace("'","''"),'','',trailer,fanart_db,'',url,'4',None,None,year))
dbcur_kodi.execute("INSERT INTO files Values ('%s', '%s', '%s', '%s', '%s', '%s');" % (file_index,'99879',link.replace("'","''"),'','',''))
dbcur_kodi.execute("INSERT INTO art Values ('%s', '%s', '%s', '%s', '%s');" % (art_index,index,'movie','fanart',fan))
dbcur_kodi.execute("INSERT INTO art Values ('%s', '%s', '%s', '%s', '%s');" % (art_index+1,index,'movie','poster',icon))
for items in genere.split('/'):
if items.strip() in all_gen:
dbcur_kodi.execute("INSERT INTO genre_link Values ('%s', '%s', '%s');" % (all_gen[items.strip()],index,'movie'))
'''
index+=1
file_index+=1
art_index+=2
'''
def remove_color(name):
regex='COLOR (.+?)\]'
m=re.compile(regex).findall(name)
if len(m)>0:
for items in m:
name=name.replace('[COLOR %s]'%items,'').replace('[/COLOR]','')
regex='\[(.+?)\]'
m=re.compile(regex).findall(name)
if len(m)>0:
name=name.replace(m[0],'[COLOR green]'+m[0]+'[/COLOR]')
name=name.replace('[[','[').replace(']]',']')
regex='\{(.+?)\}'
m=re.compile(regex).findall(name)
if len(m)>0:
name=name.replace('{'+m[0]+'}','')
name='{'+m[0]+'}'+name
name=name.replace(m[0],'[COLOR blue]'+m[0]+'[/COLOR]')
name_s=name.split('-')
m=[]
found=0
sv_items2=''
for items2 in name_s:
try:
x=float(items2.replace('GB','').split('{')[0])
m.append(items2.split('{')[0])
found=1
except:
pass
if len(items2)>1:
sv_items2=items2
if found==0:
name=name.replace(sv_items2,'[COLOR gold]'+sv_items2+'[/COLOR]')
if len(m)>0:
name=name.replace(m[0],'')
name=m[0]+'-'+name
name=name.replace(m[0],'[COLOR coral]'+m[0]+'[/COLOR]')
#name=name.replace('GB','[COLOR gold]GB[/COLOR]')
return name
def new_show_sources(m,data,description,eng_name,episode,image,heb_name,iconimage,id,prev_name,original_title,season,show_original_year,n,rest_data,n_magnet,r_results,len_all_torrent_s,next_ep,count_heb,only_torrent,isr):
global stop_try_play,done1
original_data=[]
original_data.append((m,data,description,eng_name,episode,image,heb_name,iconimage,id,prev_name,original_title,season,show_original_year,n,rest_data,n_magnet,r_results,len_all_torrent_s,next_ep))
global list_index
list=[]
stop_try_play=False
menu=[]
all_links=[]
all_s_names=[]
all_plot=[]
all_server_name=[]
real_index=0
if len(r_results)>0:
list.append('[COLOR khaki][I]►►► RD Sources Only ◄◄◄[/I][/COLOR]'+'$$$$$$$'+r_results[0])
menu.append(['[COLOR khaki][I]►►► RD Sources Only ◄◄◄[/I][/COLOR]', '','','','','',r_results[0],''])
all_links.append(r_results[0])
all_s_names.append('RD Sources Only')
all_plot.append('RD Sources Only')
all_server_name.append('0')
real_index+=1
if len(next_ep)>0:
list.append('[COLOR khaki][I]►►► Open Next Episode ◄◄◄[/I][/COLOR]'+'$$$$$$$'+next_ep[0])
menu.append(['[COLOR khaki][I]►►► Open Next Episode ◄◄◄[/I][/COLOR]', '','','','','',next_ep[0],''])
all_links.append(next_ep[0])
all_s_names.append('Open Next Episode')
all_plot.append('Open Next Episode')
all_server_name.append('0')
real_index+=1
if len(n_magnet)>0:
list.append('[COLOR khaki][I]►►►(%s) Magnet Links ◄◄◄[/I][/COLOR]'%len_all_torrent_s+'$$$$$$$'+n_magnet[0])
menu.append(['[COLOR khaki][I]►►►(%s) Magnet Links ◄◄◄[/I][/COLOR]'%len_all_torrent_s, '','','','','',n_magnet[0],''])
all_links.append(n_magnet[0])
all_s_names.append('Magnet Links')
all_plot.append('Magnet Links')
all_server_name.append('0')
real_index+=1
if len(n)>0:
list.append('[COLOR lightgreen][I]►►► Rest of Results ◄◄◄[/I][/COLOR]'+'$$$$$$$'+n[0])
menu.append(['[COLOR lightgreen][I]►►► Rest of Results ◄◄◄[/I][/COLOR]', '','','','','',n[0],''])
all_links.append(n[0])
all_s_names.append('Rest of Results ')
all_plot.append('Rest of Results ')
all_server_name.append('0')
real_index+=1
if Addon.getSetting("auto_enable_new")== 'true' and Addon.getSetting("new_window_type2")=='3':
list.append('[COLOR khaki][I]►►►Auto Play◄◄◄[/I][/COLOR]')
menu.append(['[COLOR khaki][I]►►►Auto Play ◄◄◄[/I][/COLOR]', '','','','','','',''])
all_links.append('www')
all_s_names.append('Auto Play')
all_plot.append('-Auto Play-')
all_server_name.append('0')
real_index+=1
if allow_debrid:
rd_domains=cache.get(get_rd_servers, 72, table='pages')
else:
rd_domains=[]
list_of_play=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre,supplier,size in m:
if size=='0 GB' or '0.0 GB' in size:
size=' '
o_server=server
name=remove_color(name)
all_plot.append(plot)
all_server_name.append(server)
pre_n=''
if pre>0:
pre_n='[COLOR gold]'+str(pre)+'%[/COLOR]'
regex='\[COLOR green\](.+?)\[/COLOR\]'
m=re.compile(regex).findall(name)
server=''
supplay=supplier
if '-' in supplay:
supplay=supplay.split('-')[0]
if len(m)>0:
server=m[0]
name=name.replace('[COLOR green]%s[/COLOR]'%m[0],'')
server='[COLOR plum]'+server+'[/COLOR]'
regex='\[COLOR gold\](.+?)\[/COLOR\]'
m=re.compile(regex).findall(name)
if len(m)>0:
name=name.replace('[COLOR gold]%s[/COLOR]'%m[0],'')
regex='\[COLOR coral\](.+?)\[/COLOR\]'
if '1337' in name:
regex=' - (.+?) GB'
m=re.compile(regex).findall(name)
if len(m)>0:
size=m[0].replace('--','')+' GB'
name=name.replace(' - %s GB'%m[0],'')
else:
m=re.compile(regex).findall(name)
if len(m)>0:
size=m[0]
name=name.replace('[COLOR coral]%s[/COLOR]'%m[0],'')
regex='\{(.+?)\}'
m=re.compile(regex).findall(name)
if len(m)>0:
name=name.replace('{%s}'%m[0],'')
supplay=m[0]
if '2160' in q or '4k' in q.lower():
q='2160'
elif '1080' in q:
q='1080'
elif '720' in q:
q='720'
elif '480' in q:
q='480'
elif '360' in q:
q='360'
else:
q='unk'
rd=False
try:
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].lower()
except:
host='no'
if host in rd_domains or (('torrent' in server.lower() or 'torrent' in name.lower() or 'magnet' in name.lower() or 'magnet' in server.lower()) and allow_debrid):
rd=True
add_rd=''
if allow_debrid and 'magnet' in name:
rd=True
if rd:
add_rd='[COLOR gold]RD- [/COLOR]'
add_c=''
if 'Cached ' in name:
add_c='[COLOR gold] Cached [/COLOR]'
supplay=supplay.replace('P-0/','')
txt=add_c+'[COLOR lightseagreen]'+add_rd+name.replace('Cached ','').replace('-',' ').replace('%20',' ').strip().decode('utf-8','ignore')+'[/COLOR]\nServer: '+server+' Subs: '+str(pre_n)+' Quality:[COLOR gold] ◄'+q+'► [/COLOR]Provider: [COLOR lightblue]'+supplay.decode('utf-8','ignore')+'[/COLOR] Size:[COLOR coral]'+size+'[/COLOR]$$$$$$$'+link.decode('utf-8','ignore')
menu.append([name.replace('-',' ').replace('%20',' ').strip(), server,str(pre_n),q,supplay,size,link,rd])
list_of_play.append((name,link,icon,image,plot,year,season,episode,original_title,saved_name.encode('utf8'),heb_name,show_original_year,eng_name,'0',prev_name,id,supplay))
list.append(txt)
all_links.append(link)
all_s_names.append(saved_name)
#time_left=xbmc.Player().getTotalTime()-xbmc.Player().getTime()
if len(rest_data)>0:
thread=[]
time_to_save, original_title,year,original_title2,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local=rest_data[0]
thread.append(Thread(get_rest_s, time_to_save,original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local))
thread[0].start()
if Addon.getSetting("new_window_type2")=='1':
menu = ContextMenu('plugin.video.destinyds', menu,iconimage,image,description)
menu.doModal()
param = menu.params
del menu
if param==888:
logging.warning('END EXIT')
return 'END'
list_index=param
elif Addon.getSetting("new_window_type2")=='2':
menu = ContextMenu_new('plugin.video.destinyds', menu,iconimage,image,description)
menu.doModal()
param = menu.params
del menu
if param==888:
logging.warning('END EXIT')
return 'END'
list_index=param
elif Addon.getSetting("new_window_type2")=='3':
param=0
done1_1=3
global now_playing_server,playing_text,mag_start_time_new
xbmc.executebuiltin("Dialog.Close(busydialog)")
menu2 = ContextMenu_new2('plugin.video.destinyds', menu,iconimage,image,description)
menu2.show()
play_now=0
if Addon.getSetting("play_first")=='true':
if not allow_debrid:
for name, server,pre_n,q,supplay,size,link,rd in menu:
if "magnet" in server:
real_index+=1
elif len(server)>0:
break
if (real_index<len(all_s_names)) and only_torrent=='no':
play_now=1
xbmc.sleep(100)
'''
try:
xbmc.sleep(100)
play(all_s_names[real_index].encode('utf8'),all_links[real_index],iconimage,image,all_plot[real_index],data,season,episode,original_title,all_s_names[real_index].encode('utf8'),heb_name,show_original_year,eng_name,'0',prev_name,id,windows_play=True)
except:
pass
'''
while param!=888:
try:
param = menu2.params
except Exception as e:
logging.warning('Skin E:'+str(e))
param=7777
list_index=param
fast_link=' '
f_plot=' '
if (param!=7777 and param!=None and param!=666666) or play_now>0:
if play_now>0:
fast_link=all_links[real_index]
f_plot=all_plot[real_index]
list_index=real_index
else:
if list_index!=999 and list_index!=888:
fast_link=all_links[list_index]
f_plot=all_plot[list_index]
if list_index==888 or list_index==999:
logging.warning('Stop Play')
stop_try_play=True
return 'ok'
now_playing_server=all_server_name[list_index]+'$$$$'+str(list_index-real_index+1)+'/'+str(len(all_links))
if fast_link!=' ':
xbmc.Player().stop()
if 'plugin:' in fast_link:
#xbmc.executebuiltin('Container.update("%s")'%fast_link)
url,name,iconimage,mode,fanart,description,data,original_title,id,season,episode,tmdbid,eng_name,show_original_year,heb_name,isr,saved_name,prev_name,dates,data1,fast_link,fav_status,only_torrent,only_heb_servers,new_windows_only=undo_get_rest_data(fast_link)
menu2.close_now()
get_sources(name, url,iconimage,fanart,description+'-NEXTUP-',data,original_title,season,episode,id,eng_name,show_original_year,heb_name,str(isr),dates=dates,fav_status=fav_status)
break
else:
logging.warning('1')
if ('Auto Play' in all_s_names[list_index]) or play_now>0:
auto_fast=True
play_now=0
new_index=real_index
errors=[]
play_ok=0
while(1):
menu2.tick=60
menu2.auto_play=1
try:
menu2.tick=60
if xbmc.Player().isPlaying():
play_time=int(Addon.getSetting("play_full_time"))
count_p=0
while(1):
menu2.tick=60
vidtime = xbmc.Player().getTime()
try:
value_d=(vidtime-(int(float(mag_start_time_new))))
except:
value_d=vidtime
if value_d> (play_time/2) :
play_ok=1
#xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
logging.warning('Closing Played')
break
count_p+=1
if count_p>(play_time*2) :
logging.warning('Closing Not Played')
break
try:
param = menu2.params
except Exception as e:
logging.warning('Skin Error2:'+str(e))
if param==888:
logging.warning('Close:11')
logging.warning('CANCEL PLAY AUTO')
break
xbmc.sleep(500)
if play_ok>0:
logging.warning('Close:10')
break
f_new_link=all_links[new_index]
'''
if new_index<5:
f_new_link='www'
'''
now_playing_server=all_server_name[new_index]+'$$$$'+str(new_index-real_index+1)+'/'+str(len(all_links))
playing_text='Trying Next Link$$$$'+'0'
try:
param = menu2.params
except Exception as e:
logging.warning('Skin Error2:'+str(e))
if param==888:
logging.warning('Close:9')
logging.warning('CANCEL PLAY AUTO')
break
menu2.count_p=0
logging.warning('time')
if not allow_debrid and Addon.getSetting('auto_magnet_free')=='false':
if not "magnet" in all_server_name[new_index]:
play(name,f_new_link,iconimage,image,all_plot[new_index],data,season,episode,original_title,all_s_names[new_index].encode('utf8'),heb_name,show_original_year,eng_name,isr,prev_name,id,windows_play=False,auto_fast=auto_fast,auto_play=True)
else:
play(name,f_new_link,iconimage,image,all_plot[new_index],data,season,episode,original_title,all_s_names[new_index].encode('utf8'),heb_name,show_original_year,eng_name,isr,prev_name,id,windows_play=False,auto_fast=auto_fast,auto_play=True)
if (new_index>real_index):
errors.append(f_new_link+'\n'+all_plot[new_index])
logging.warning('Send Error:'+f_new_link)
new_index+=1
logging.warning('time2')
if new_index>=len(all_links):
logging.warning('Close:91')
#menu2.close_now()
break
playing_text='Playing Please Wait...$$$$'+'0'
xbmc.sleep(500)
if len(errors)>0:
sendy('\n'.join(errors),'Error Auto Des','DesAuto')
except Exception as e:
import linecache
new_index+=1
playing_text='Bad Link Moving on...$$$$'+'0'
if new_index>=len(all_links):
logging.warning('Close:92')
menu2.close_now()
break
playing_text='Bad Source...$$$$'+'0'
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
logging.warning('ERROR IN SKIN:'+str(e))
logging.warning('inline:'+line)
logging.warning(e)
pass
else:
auto_fast=False
counter_end=0
try:
while(1):
counter_end+=1
if counter_end>500:
menu2.close_now()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Try Manual...'.decode('utf8'))).encode('utf-8'))
break
if fast_link!=' ' and (param!=7777 and param!=None and param!=666666):
menu2.params=666666
xbmc.Player().stop()
now_playing_server=all_server_name[param]+'$$$$'+str(param-real_index+1)+'/'+str(len(all_links))
if 'plugin:' in fast_link:
#xbmc.executebuiltin('Container.update("%s")'%fast_link)
url,name,iconimage,mode,fanart,description,data,original_title,id,season,episode,tmdbid,eng_name,show_original_year,heb_name,isr,saved_name,prev_name,dates,data1,fast_link,fav_status,only_torrent,only_heb_servers,new_windows_only=undo_get_rest_data(fast_link)
menu2.close_now()
get_sources(name, url,iconimage,fanart,description+'-NEXTUP-',data,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates=dates,fav_status=fav_status)
break
else:
try:
play(name,fast_link,iconimage,image,f_plot,data,season,episode,original_title,all_s_names[list_index].encode('utf8'),heb_name,show_original_year,eng_name,isr,prev_name,id,windows_play=False,auto_fast=auto_fast,auto_play=True)
except:
playing_text='Bad Link...$$$$'+'0'
pass
fast_link=''
play_ok=0
if xbmc.Player().isPlaying():
play_time=int(Addon.getSetting("play_full_time"))
count_p=0
while(1):
menu2.tick=60
vidtime = xbmc.Player().getTime()
try:
value_d=(vidtime-(int(float(mag_start_time_new))))
except:
value_d=vidtime
if value_d> (play_time/2) :
play_ok=1
#xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
logging.warning('Closing Played')
break
count_p+=1
if count_p>(play_time*2) :
logging.warning('Closing Not Played')
break
try:
param = menu2.params
except Exception as e:
logging.warning('Skin Error2:'+str(e))
if param==888:
logging.warning('Close:11')
logging.warning('CANCEL PLAY AUTO')
break
xbmc.sleep(500)
if play_ok>0:
logging.warning('Close:120')
break
if (param!=7777 and param!=None and param!=666666):
list_index=param-real_index
logging.warning('list_index: '+str(list_index))
if list_index!=999 and list_index!=888:
fast_link=all_links[list_index]
f_plot=all_plot[list_index]
if list_index==888 or list_index==999:
logging.warning('Stop Play')
stop_try_play=True
logging.warning('Break now: '+str(list_index))
break
try:
param = menu2.params
except Exception as e:
logging.warning('Skin E:'+str(e))
param=7777
if param==888 or param==999:
logging.warning('Stop Play')
stop_try_play=True
logging.warning('Break now2: '+str(list_index))
break
xbmc.sleep(500)
logging.warning('Tick: '+str(param))
except Exception as e:
import linecache
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
logging.warning('SKIN EEEE:'+str(e)+' At:'+str(lineno))
pass
menu2.played()
if param==888 or param==7777:
#menu2.close_now()
#xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
break
else:
xbmc.sleep(500)
counter=0
while 1:
alive=0
for thread in threading.enumerate():
if (thread.isAlive()):
alive=1
thread._Thread__stop()
if alive==0 or counter>10:
break
counter+=1
xbmc.sleep(200)
logging.warning('Del window')
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
done1=2
del menu2
else:
window = sources_window(original_title ,list,'0',image,description)
window.doModal()
del window
if Addon.getSetting("new_window_type2")!='3':
fast_link=' '
f_plot=' '
if list_index!=999 and list_index!=888:
fast_link=all_links[list_index]
f_plot=all_plot[list_index]
if list_index==888 or list_index==999:
logging.warning('Stop Play')
stop_try_play=True
url=''
playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
playlist.clear()
return 'OK'
logging.warning('list_index:'+str(list_index))
if fast_link!=' ':
xbmc.Player().stop()
if 'plugin:' in fast_link:
#xbmc.executebuiltin('Container.update("%s")'%fast_link)
url,name,iconimage,mode,fanart,description,data,original_title,id,season,episode,tmdbid,eng_name,show_original_year,heb_name,isr,saved_name,prev_name,dates,data1,fast_link,fav_status,only_torrent,only_heb_servers,new_windows_only=undo_get_rest_data(fast_link)
get_sources(name.replace('Cached ',''), url,iconimage,fanart,description+'-NEXTUP-',data,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates=dates,fav_status=fav_status)
else:
logging.warning(all_s_names[list_index])
play(name.replace('Cached ',''),fast_link,iconimage,image,f_plot,data,season,episode,original_title,all_s_names[list_index].encode('utf8'),heb_name,show_original_year,eng_name,'0',prev_name,id)
'''
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
played=False
while isbusy:
isbusy = xbmc.getCondVisibility('Window.IsActive(busydialog)')
xbmc.sleep(200)
if xbmc.Player().isPlaying():
xbmc.sleep(2000)
vidtime = xbmc.Player().getTime()
if vidtime > 0:
played=True
logging.warning('played')
logging.warning(played)
if not played:
m,data,description,eng_name,episode,image,heb_name,iconimage,id,prev_name,original_title,season,show_original_year,n,rest_data,n_magnet,only_heb,len_all_torrent_s,next_ep=original_data[0]
new_show_sources(m,data,description,eng_name,episode,image,heb_name,iconimage,id,prev_name,original_title,season,show_original_year,n,rest_data,n_magnet,only_heb,len_all_torrent_s,next_ep)
xbmc.sleep(200)
'''
return 'END'
return 'ok'
def show_sources():
global list_index
list=[]
time_to_save=int(Addon.getSetting("save_time"))
dbcur.execute("SELECT * FROM sources")
match = dbcur.fetchone()
if match!=None:
name,url,icon,image,plot,year,season,episode,original_title,heb_name,show_original_year,eng_name,isr,id=match
m=[]
m.append((name,url,icon,image,plot,year,season,episode,original_title,heb_name,show_original_year,eng_name,isr,id))
prev_name=name
iconimage=icon
fanart=image
data=year
description=plot.replace('-Episode ','').replace('-NEXTUP-','').encode('utf8')
if season!=None and season!="%20":
name1=name
else:
name1=name.encode('utf8').replace("%27","'").replace("%20"," ")
match_a,all_links_fp,all_pre,f_subs= cache.get(c_get_sources, time_to_save,original_title ,year,original_title.replace("%27","'"),season,episode,id,eng_name.replace("%27","'"),show_original_year,heb_name.replace("%27","'"),isr,False,'false','no','0', table='pages')
all_data=fix_links(match_a,iconimage,fanart,description,show_original_year,season,episode)
all_links=[]
for name,link,icon,image,plot,year,q,server,f_q,saved_name,pre in all_data:
list.append('[COLOR gold]'+str(pre)+'%[/COLOR]-[COLOR gold]'+q+'[/COLOR][COLOR lightblue]'+server+'[/COLOR]-'+name+'$$$$$$$'+link)
all_links.append(link)
#time_left=xbmc.Player().getTotalTime()-xbmc.Player().getTime()
time_to_wait=int(Addon.getSetting("show_p_time"))
window = MyAddon(name ,list,time_to_wait,image,plot)
window.doModal()
del window
fast_link=' '
if list_index!=999 and list_index!=888:
fast_link=all_links[list_index]
if list_index==888:
return '0'
if fast_link!=' ':
xbmc.Player().stop()
xbmc.executebuiltin(('XBMC.PlayMedia("plugin://plugin.video.destinyds/?data=%s&dates=EMPTY&description=%s&eng_name=%s&episode=%s&fanart=%s&heb_name=%s&iconimage=%s&id=%s&isr=' '&mode2=5&name=%s&original_title=%s&season=%s&show_original_year=%s&tmdbid=EMPTY&url=%s&fast_link=%s&prev_name=%s",return)'%(data,urllib.quote_plus(description),eng_name,episode,urllib.quote_plus(fanart),heb_name,urllib.quote_plus(iconimage),id,prev_name,original_title,season,show_original_year,urllib.quote_plus(fast_link),urllib.quote_plus(fast_link),prev_name)).replace('EMPTY','%20'))
def last_sources():
show_sources()
def acestream():
addDir3('Search'.decode('utf8'),'www',77,'https://lh3.googleusercontent.com/0m0JeYjdEbLUVYCn_4vQjgaybPzyZB9z1fazy07JFkKyF6dK1gboo7_N9cz0GADxJw4=s180','https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg','Acestream'.decode('utf8'))
addDir3('My Channels'.decode('utf8'),'www',79,'https://lh3.googleusercontent.com/0m0JeYjdEbLUVYCn_4vQjgaybPzyZB9z1fazy07JFkKyF6dK1gboo7_N9cz0GADxJw4=s180','https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg','Acestream'.decode('utf8'))
def search_ace():
search_entered=''
keyboard = xbmc.Keyboard(search_entered, 'Enter Search')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://acestreamsearch.com/en/',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
data = {
'cn': search_entered
}
response = requests.post('https://acestreamsearch.com/en/', headers=headers, data=data).content
regex_pre='<ul class="list-group">(.+?)</ul>'
match_pre=re.compile(regex_pre).findall(response)
for item in match_pre:
regex='<li class="list-group-item"><a href="(.+?)">(.+?)<'
match=re.compile(regex).findall(item)
icon='https://lh3.googleusercontent.com/0m0JeYjdEbLUVYCn_4vQjgaybPzyZB9z1fazy07JFkKyF6dK1gboo7_N9cz0GADxJw4=s180'
fanart='https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg'
for link,name in match:
regex='acestream://(.+?)(?:/|$)'
match=re.compile(regex).findall(link)
f_link='http://127.0.0.1:6878/ace/getstream?id='+match[0]
addLink(name,f_link,5,False,iconimage=icon,fanart=fanart,description=name)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
params = (
('sort', 'fname'),
)
logging.warning('Getting')
response = requests.get('http://91.92.66.82/trash/ttv-list/AceLiveList.php', headers=headers, params=params).content
regex='type=checkbox /></TD><TD data-v="(.+?)">.+?</TD><TD data-v=".+?">.+?</TD><TD>(.+?)<'
match=re.compile(regex).findall(response)
for name,link in match:
if search_entered.lower() in name.lower():
f_link='http://127.0.0.1:6878/ace/getstream?id='+link
addLink('[S-2] '+name,f_link,5,False,iconimage=icon,fanart=fanart,description=name)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = requests.get('https://www.livefootballol.me/acestream-channel-list-2018.html', headers=headers).content
regex='<tr>(.+?)</tr>'
match_pre=re.compile(regex,re.DOTALL).findall(response)
for items in match_pre:
regex='a href=".+?>(.+?)<.+?<td>(.+?)<'
match=re.compile(regex,re.DOTALL).findall(items)
#logging.warning(match
for name,link in match:
if search_entered.lower() in name.lower():
regex='acestream://(.+?)(?:/|$)'
match=re.compile(regex).findall(link)
f_link='http://127.0.0.1:6878/ace/getstream?id='+match[0]
addLink('[S-3] '+name,f_link,5,False,iconimage=icon,fanart=fanart,description=name)
def chan_ace(name,url,description):
if description=='add':
dbcur.execute("INSERT INTO acestream Values ('%s', '%s', '%s', '%s','%s', '%s', '%s');" % (name.replace("'","%27"),url,description.replace("'","%27"),'','','',''))
dbcon.commit()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Added'.decode('utf8'))).encode('utf-8'))
elif description=='remove':
dbcur.execute("DELETE FROM acestream WHERE url = '%s'"%(url))
dbcon.commit()
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Removed'.decode('utf8'))).encode('utf-8'))
def refresh_ace(search_entered):
o_name=search_entered
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Referer': 'https://acestreamsearch.com/en/',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
data = {
'cn': search_entered
}
f_link=''
response = requests.post('https://acestreamsearch.com/en/', headers=headers, data=data).content
regex_pre='<ul class="list-group">(.+?)</ul>'
match_pre=re.compile(regex_pre).findall(response)
for item in match_pre:
regex='<li class="list-group-item"><a href="(.+?)">(.+?)<'
match=re.compile(regex).findall(item)
icon='https://lh3.googleusercontent.com/0m0JeYjdEbLUVYCn_4vQjgaybPzyZB9z1fazy07JFkKyF6dK1gboo7_N9cz0GADxJw4=s180'
fanart='https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg'
for link,name in match:
regex='acestream://(.+?)(?:/|$)'
match=re.compile(regex).findall(link)
if search_entered==name:
f_link='http://127.0.0.1:6878/ace/getstream?id='+match[0]
#return f_link
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
params = (
('sort', 'fname'),
)
response = requests.get('http://91.92.66.82/trash/ttv-list/AceLiveList.php', headers=headers, params=params).content
regex='type=checkbox /></TD><TD data-v="(.+?)">.+?</TD><TD data-v=".+?">.+?</TD><TD>(.+?)<'
match=re.compile(regex).findall(response)
logging.warning('Renew s2')
for name,link in match:
if search_entered.lower() =='[s-2] '+name.lower():
logging.warning('Found s2')
f_link='http://127.0.0.1:6878/ace/getstream?id='+link
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:62.0) Gecko/20100101 Firefox/62.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache',
}
response = requests.get('https://www.livefootballol.me/acestream-channel-list-2018.html', headers=headers).content
regex='<tr>(.+?)</tr>'
match_pre=re.compile(regex,re.DOTALL).findall(response)
for items in match_pre:
regex='a href=".+?>(.+?)<.+?<td>(.+?)<'
match=re.compile(regex,re.DOTALL).findall(items)
#logging.warning(match
for name,link in match:
if search_entered.lower() =='[s-3] '+ name.lower():
regex='acestream://(.+?)(?:/|$)'
match=re.compile(regex).findall(link)
f_link='http://127.0.0.1:6878/ace/getstream?id='+match[0]
if f_link=='':
xbmcgui.Dialog().ok("Error",'Missing Channel')
sys.exit()
else:
return f_link
def my_ace():
dbcur.execute("SELECT * FROM acestream")
match = dbcur.fetchall()
icon='https://lh3.googleusercontent.com/0m0JeYjdEbLUVYCn_4vQjgaybPzyZB9z1fazy07JFkKyF6dK1gboo7_N9cz0GADxJw4=s180'
fanart='https://i.pinimg.com/originals/6b/18/31/6b1831503dc0e0470b2bf1e1b5df978f.jpg'
for name,link,plot,op,op1,op2,op3 in match:
addLink(name,'aceplay',5,False,iconimage=icon,fanart=fanart,description=name)
def all_new_source(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers,verify=False).content
regex='<div class=".+?-post">.+?a href="(.+?)">(.+?)<.+?<img.+?src="(.+?)"'
match=re.compile(regex,re.DOTALL).findall(x)
for link,name,image in match:
addDir3(name,link,85,image,image,name)
regex='link rel="next" href="(.+?)"'
match=re.compile(regex).findall(x)
if len(match)>0:
addDir3('[COLOR aqua][I]Next Page[/I][/COLOR]',match[0],85,iconimage,fanart,'Next Page')
def uploadThis(f,myFTP):
from os.path import basename
fh = open(f, 'rb')
myFTP.storbinary('STOR %s' % Addon.getSetting("db_bk_name")+'1_'+str(time.strftime("%d/%m/%Y")).replace('/','_'), fh)
fh.close()
def do_bakcup(silent='True'):
from zfile import ZipFile
import datetime,os
from shutil import copyfile
from os.path import basename
if silent=='False':
logging.warning('silent2')
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Connecting server', '','')
dp.update(0, 'Please Wait','Zipping', '' )
zp_file=os.path.join(user_dataDir, 'data.zip')
cacheFile = os.path.join(user_dataDir, 'cache_play.db')
setting_file=os.path.join(user_dataDir, 'settings.xml')
if os.path.isfile(zp_file):
os.remove(zp_file)
zipf = ZipFile(zp_file , mode='w')
zipf.write(cacheFile , basename(cacheFile))
zipf.write(setting_file , basename(setting_file))
zipf.close()
from os.path import basename
if Addon.getSetting("remote_selection")=='0':
onlyfiles=[]
db_bk_folder=xbmc.translatePath(Addon.getSetting("remote_path"))
dirList, onlyfiles =xbmcvfs.listdir(db_bk_folder)
ct_min=0
count=0
for files in onlyfiles:
f_patch_file=os.path.join(db_bk_folder,files)
if Addon.getSetting("db_bk_name") in basename(files):
count+=1
if count>5:
for files in onlyfiles:
f_file=(os.path.join(db_bk_folder,files))
if Addon.getSetting("db_bk_name") not in basename(f_file):
continue
st = xbmcvfs.Stat(f_file)
ct_date = st.st_mtime()
#ct_date=time.ctime(os.path.getctime(f_file))
if ct_min==0:
ct_min=ct_date
elif ct_date<ct_min:
ct_min=ct_date
for files in onlyfiles:
f_file=os.path.join(db_bk_folder,files)
if Addon.getSetting("db_bk_name") not in basename(f_file):
continue
st = xbmcvfs.Stat(f_file)
ct_date = st.st_mtime()
#ct_date=time.ctime(os.path.getctime(f_file))
if ct_date==ct_min:
xbmcvfs.delete(f_file)
break
xbmcvfs.copy (zp_file,os.path.join(db_bk_folder,Addon.getSetting("db_bk_name")+'_'+str(time.strftime("%d/%m/%Y")).replace('/','_')))
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Backup OK'.decode('utf8'))).encode('utf-8'))
else:
if silent=='False':
dp.update(20, 'Please Wait','Connecting Server', '' )
import ftplib
import os,urllib
from datetime import datetime
import _strptime
server=Addon.getSetting("ftp_host")
username=Addon.getSetting("ftp_user")
password=Addon.getSetting("ftp_pass")
try:
myFTP = ftplib.FTP(server, username, password)
if silent=='False':
dp.update(40, 'Please Wait','Connection Successful', '' )
files = myFTP.nlst()
found=0
if silent=='False':
dp.update(60, 'Please Wait','Collecting', '' )
for f in files:
if 'kodi_backup' in f:
found=1
if found==0:
myFTP.mkd('kodi_backup')
myFTP.cwd('kodi_backup')
files = myFTP.nlst()
count=0
ct_min=0
for f in files:
if Addon.getSetting("db_bk_name") in basename(f):
count+=1
if count>5:
for f in files:
if Addon.getSetting("db_bk_name") not in basename(f):
continue
try:
modifiedTime = myFTP.sendcmd('MDTM ' + f)
#ct_date=datetime.strptime(modifiedTime[4:], "%Y%m%d%H%M%S").strftime("%d %B %Y %H:%M:%S")
try:
ct_date = datetime.strptime(modifiedTime[4:], "%Y%m%d%H%M%S").strftime("%d %B %Y %H:%M:%S")
except TypeError:
ct_date = datetime.fromtimestamp(time.mktime(time.strptime(modifiedTime[4:], "%Y%m%d%H%M%S")))
ct_date = ct_date.strftime("%d %B %Y %H:%M:%S")
if ct_min==0:
ct_min=ct_date
elif ct_date<ct_min:
ct_min=ct_date
except Exception as e:
logging.warning(e)
pass
for f in files:
if Addon.getSetting("db_bk_name") not in basename(f):
continue
modifiedTime = myFTP.sendcmd('MDTM ' + f)
#ct_date=datetime.strptime(modifiedTime[4:], "%Y%m%d%H%M%S").strftime("%d %B %Y %H:%M:%S")
try:
ct_date = datetime.strptime(modifiedTime[4:], "%Y%m%d%H%M%S").strftime("%d %B %Y %H:%M:%S")
except TypeError:
ct_date = datetime.fromtimestamp(time.mktime(time.strptime(modifiedTime[4:], "%Y%m%d%H%M%S")))
ct_date = ct_date.strftime("%d %B %Y %H:%M:%S")
if ct_date==ct_min:
myFTP.delete(f)
break
if silent=='False':
dp.update(80, 'Please Wait','Uploading', '' )
uploadThis(zp_file,myFTP)
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Backup Done'.decode('utf8'))).encode('utf-8'))
except Exception as e:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Error In Backup'.decode('utf8'))).encode('utf-8'))
try:
xbmc.sleep(1000)
if os.path.isfile(zp_file):
os.remove(zp_file)
except:
pass
if silent=='False':
dp.close()
logging.warning('Done Backing Up')
def restore_backup():
from shutil import copyfile
import os
if 1:#try:
cacheFile = os.path.join(user_dataDir, 'cache_play.db')
zp_file= os.path.join(user_dataDir, 'data.zip')
from os.path import basename
if Addon.getSetting("remote_selection")=='0':
onlyfiles=[]
db_bk_folder=xbmc.translatePath(Addon.getSetting("remote_path"))
dirList, onlyfiles =xbmcvfs.listdir(db_bk_folder)
all_n=[]
all_f=[]
for f in onlyfiles:
all_n.append(basename(f))
all_f.append(os.path.join(db_bk_folder,f))
ret = xbmcgui.Dialog().select("Choose Backup File", all_n)
if ret!=-1:
ok=xbmcgui.Dialog().yesno(("Restore from Backup"),all_n[ret]+' Restore? ')
if ok:
db_name=Addon.getSetting('db_bk_name')
if '.db' not in all_n[ret]:
xbmcvfs.copy(all_f[ret],os.path.join(user_dataDir,'temp_zip'))
unzip(os.path.join(user_dataDir,'temp_zip'),user_dataDir)
xbmcvfs.delete(os.path.join(user_dataDir,'temp_zip'))
else:
xbmcvfs.copy(all_f[ret],cacheFile)
#xbmc.executebuiltin('Container.Update')
#Addon.setSetting('db_bk_name',db_name)
xbmcgui.Dialog().ok("Restore from Backup",'[COLOR aqua][I]Restore Done[/I][/COLOR]')
else:
sys.exit()
else:
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Connecting to Server', '','')
dp.update(0, 'Please Wait','Connecting to Server', '' )
import ftplib
import os,urllib
from datetime import datetime
server=Addon.getSetting("ftp_host")
username=Addon.getSetting("ftp_user")
password=Addon.getSetting("ftp_pass")
try:
myFTP = ftplib.FTP(server, username, password)
except Exception as e:
xbmcgui.Dialog().ok('Error in Connecting',str(e))
sys.exit()
dp.update(0, 'Please Wait','Succesful', '' )
files = myFTP.nlst()
found=0
for f in files:
if 'kodi_backup' in f:
found=1
if found==0:
xbmcgui.Dialog().ok("Restore",'[COLOR red][I]No Backup Exists[/I][/COLOR]')
sys.exit()
myFTP.cwd('kodi_backup')
files = myFTP.nlst()
count=0
ct_min=0
all_n=[]
all_f=[]
dp.update(0, 'Please Wait','Collecting', '' )
for f in files:
all_n.append(basename(f))
all_f.append(f)
ret = xbmcgui.Dialog().select("Choose File to Backup", all_n)
if ret!=-1:
ok=xbmcgui.Dialog().yesno(("Restore"),all_n[ret]+' Restore? ')
if ok:
db_name=Addon.getSetting('db_bk_name')
i=cacheFile
dp.update(0, 'Please Wait','Downloading', '' )
myFTP.retrbinary("RETR " + all_f[ret] ,open(i, 'wb').write)
dp.close()
if '.db' not in all_n[ret]:
myFTP.retrbinary("RETR " + all_f[ret] ,open(zp_file, 'wb').write)
unzip(zp_file,user_dataDir)
else:
myFTP.retrbinary("RETR " + all_f[ret] ,open(i, 'wb').write)
Addon.setSetting('db_bk_name',db_name)
xbmcgui.Dialog().ok("Restore",'[COLOR aqua][I]Succesful[/I][/COLOR]')
else:
sys.exit()
if os.path.isfile(zp_file):
os.remove(zp_file)
try:
dp.close()
except:
pass
def backup_vik():
import datetime
strptime = datetime.datetime.strptime
logging.warning('backing up')
threading.Thread(target=do_bakcup).start()
return '1'
def check_ftp_conn():
import ftplib
import os,urllib
from datetime import datetime
try:
server=Addon.getSetting("ftp_host")
username=Addon.getSetting("ftp_user")
password=Addon.getSetting("ftp_pass")
myFTP = ftplib.FTP(server, username, password)
xbmcgui.Dialog().ok('Success','[COLOR gold]Success[/COLOR]')
except Exception as e:
xbmcgui.Dialog().ok('Error',str(e))
def nba(url,icon,fanart):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
html=requests.get(url,headers=headers).content
regex='<option class="level-0" value="(.+?)">(.+?)<'
match=re.compile(regex).findall(html)
addNolink('Teams','www',940,False,iconimage=icon,fanart=fanart)
for link,name in match:
addDir3(name,'https://www.nbafullhd.com/?cat='+link,107,icon,fanart,name)
addNolink('[COLOR lightblue][I]Archives[/I][/COLOR]','www',940,False,iconimage=icon,fanart=fanart)
regex_pre='<option value="">Select Month</option>(.+?)</select>'
m_pre=re.compile(regex_pre,re.DOTALL).findall(html)
regex="<option value='(.+?)'>(.+?)</option>"
match=re.compile(regex).findall(m_pre[0])
for link,name in match:
addDir3(name,link,107,icon,fanart,name)
def deep_nba(url,icon,fanart):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
html=requests.get(url,headers=headers).content
regex='<div class="entry-thumbnail thumbnail-landscape">.+?<a href="(.+?)" title="(.+?)".+?src="(.+?)"'
match=re.compile(regex,re.DOTALL).findall(html)
for link,title,image in match:
title=replaceHTMLCodes(title)
addDir3(title,link,108,image,image,title)
regex='class="nextpostslink" rel="next" href="(.+?)"'
match=re.compile(regex).findall(html)
if len(match)>0:
addDir3('[COLOR aqua][I]Next Page[/I][/COLOR]',match[0],107,icon,fanart,'[COLOR aqua][I]Next Page[/I][/COLOR]')
def play_nba(name,url,icon,fanart):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Connection': 'keep-alive',
}
html=requests.get(url,headers=headers).content
regex='<p>Watch NBA(.+?)<div class="'
m_pre=re.compile(regex,re.DOTALL).findall(html)
regex='iframe.+?src="(.+?)"'
m22=re.compile(regex).findall(html)
for links in m22:
if 'facebook' not in links:
if 'http' not in links:
m22[0]='http:'+links
regex='//(.+?)/'
server=re.compile(regex).findall(links)
if len(server)>0:
server=server[0]
addLink('[COLOR gold]'+server+'[/COLOR]', links,5,False,icon,fanart,'__NBA__'+'\n-HebDub-',original_title=name,saved_name=name)
if len(m_pre)>0:
regex='a href="(.+?)".+?alt="(.+?)"'
m=re.compile(regex,re.DOTALL).findall(m_pre[0])
for link,nn in m:
z=requests.get(link,headers=headers).content
regex='iframe.+?src="(.+?)"'
m22=re.compile(regex).findall(z)
if len(m22)>0:
if 'http' not in m22[0]:
m22[0]='http:'+m22[0]
regex='//(.+?)/'
server=re.compile(regex).findall(m22[0])
if len(server)>0:
server=server[0]
addLink('[COLOR gold]'+server+'[/COLOR] - '+nn, m22[0],5,False,icon,fanart,'__NBA__'+'\n-HebDub-',original_title=name,saved_name=name)
def last_ep_aired(id):
x=requests.get('https://api.themoviedb.org/3/tv/%s?api_key=1248868d7003f60f2386595db98455ef&language=en'%id).json()
season=str(x['last_episode_to_air']['season_number'])
episode=str(x['last_episode_to_air']['episode_number'])
name=x['last_episode_to_air']['name']
fanart=domain_s+'image.tmdb.org/t/p/original/'+x['last_episode_to_air']['still_path']
icon=domain_s+'image.tmdb.org/t/p/original/'+x['poster_path']
description=x['last_episode_to_air']['overview']
data=str(x['first_air_date'].split("-")[0])
original_title=urllib.quote_plus(x['original_name'])
eng_name=original_title
show_original_year=str(x['first_air_date'].split("-")[0])
heb_name=x['name'].decode('utf-8')
isr='0'
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0 ):
fav_status='true'
else:
fav_status='false'
#get_sources(name,'www',icon,fanart,description,data,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,fav_status=fav_status)
#xbmcplugin.endOfDirectory(int(sys.argv[1]))
xbmc.executebuiltin(('ActivateWindow(10025,"plugin://plugin.video.destinyds/?name=%s&url=%s&iconimage=%s&fanart=%s&description=%s&data=%s&original_title=%s&id=%s&season=%s&tmdbid=%s&show_original_year=%s&heb_name=%s&isr=%s&mode2=4&episode=%s&eng_name=%s&fav_status=%s",return)'%(name,url,icon,fanart,description,data,original_title,id,season,tmdbid,show_original_year,heb_name,isr,episode,eng_name,fav_status)))
return 0
def get_server_types(type):
if type=='tv':
libDir = os.path.join(addonPath, 'resources', 'report_tv.txt')
else:
libDir = os.path.join(addonPath, 'resources', 'report_movie.txt')
file = open(libDir, 'r')
file_data= file.read()
file.close()
regex='Start(.+?)END'
m=re.compile(regex,re.DOTALL).findall(file_data)
all_direct=[]
all_google=[]
all_rapid=[]
for items in m:
regex='\[(.+?)\].+?\{(.+?)\}'
m2=re.compile(regex,re.DOTALL).findall(items)
for sname,stype in m2:
if 'direct' in stype.lower():
all_direct.append(sname)
if 'google' in stype.lower():
all_google.append(sname)
if 'rapidvideo' in stype.lower():
all_rapid.append(sname)
return all_direct,all_google,all_rapid
def get_im_data_rt(imdbid,plot_o,html_g,xxx):
import random
global all_data_imdb
url='https://api.themoviedb.org/3/find/%s?api_key=b7cd3340a794e5a2f35e3abb820b497f&language=en&external_source=imdb_id'%imdbid
#y=requests.get(url,headers=headers).json()
y=json.loads(urllib2.urlopen(url).read())
if 'movie' in plot_o:
r=y['movie_results']
else:
r=y['tv_results']
if len(r)>0:
if 'movie' in plot_o:
new_name=r[0]['title']
else:
new_name=r[0]['name']
icon=domain_s+'image.tmdb.org/t/p/original/'+r[0]['poster_path']
if r[0]['backdrop_path']!=None:
image=domain_s+'image.tmdb.org/t/p/original/'+r[0]['backdrop_path']
else:
image=' '
plot=r[0]['overview']
if 'movie' in plot_o:
original_title=r[0]['original_title']
else:
original_title=r[0]['original_name']
rating=r[0]['vote_average']
if 'movie' in plot_o:
if 'release_date' in r[0]:
if r[0]['release_date']==None:
year=' '
else:
year=str(r[0]['release_date'].split("-")[0])
else:
year=' '
else:
if 'first_air_date' in r[0]:
if r[0]['first_air_date']==None:
year=' '
else:
year=str(r[0]['first_air_date'].split("-")[0])
else:
year=' '
genres_list= dict([(i['id'], i['name']) for i in html_g['genres'] \
if i['name'] is not None])
try:genere = u' / '.join([genres_list[x] for x in r[0]['genre_ids']])
except:genere=''
id=str(r[0]['id'])
if 'movie' in plot_o:
fav_search_f=Addon.getSetting("fav_search_f")
fav_servers_en=Addon.getSetting("fav_servers_en")
fav_servers=Addon.getSetting("fav_servers")
else:
fav_search_f=Addon.getSetting("fav_search_f_tv")
fav_servers_en=Addon.getSetting("fav_servers_en_tv")
fav_servers=Addon.getSetting("fav_servers_tv")
if fav_search_f=='true' and fav_servers_en=='true' and (len(fav_servers)>0 ):
fav_status='true'
else:
fav_status='false'
all_data_imdb.append(( new_name , url,icon,image,plot,rating,year,genere,original_title,id,heb_name,fav_status,xxx,imdbid))
def get_data_imdb(m,plot_o):
import urllib2
global all_data_imdb
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
if 'movie' in plot_o:
url_g=domain_s+'api.themoviedb.org/3/genre/movie/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
else:
url_g=domain_s+'api.themoviedb.org/3/genre/tv/list?api_key=1248868d7003f60f2386595db98455ef&language=en'
if Addon.getSetting("dp")=='true':
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Updating', '','')
dp.update(0, 'Please Wait','Updating', '' )
z=0
html_g=requests.get(url_g).json()
thread=[]
xxx=0
for imdbid in m:
thread.append(Thread(get_im_data_rt,imdbid,plot_o,html_g,xxx))
thread[len(thread)-1].setName(imdbid)
xxx+=1
z=0
for td in thread:
td.start()
if len(thread)>38:
xbmc.sleep(255)
else:
xbmc.sleep(10)
if Addon.getSetting("dp")=='true':
dp.update(int(((z* 100.0)/(len(thread))) ), 'Please Wait','Updating', td.name )
z=z+1
num_live_pre=0
while 1:
num_live=0
still_alive=0
for yy in range(0,len(thread)):
if thread[yy].is_alive():
num_live=num_live+1
still_alive=1
if Addon.getSetting("dp")=='true':
dp.update(len(thread)-num_live_pre, 'Please Wait','Updating', thread[yy].name )
num_live_pre=num_live
if Addon.getSetting("dp")=='true':
if dp.iscanceled():
dp.close()
break
if still_alive==0:
break
xbmc.sleep(100)
if Addon.getSetting("dp")=='true':
dp.close()
return all_data_imdb
def must_see(plot,url):
o_plot=plot
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x1=requests.get(url,headers=headers).content
if 'movie' in plot:
mode=4
regex='data-titleid="(.+?)"'
else:
mode=7
regex='<div class="ribbonize" data-tconst="(.+?)"'
m=re.compile(regex).findall(x1)
if len(m)==0:
regex='<div class="ribbonize" data-tconst="(.+?)"'
m=re.compile(regex).findall(x1)
all_data=cache.get(get_data_imdb,24,m,plot, table='pages')
#all_data=get_data_imdb(m,plot)
all_data=sorted(all_data, key=lambda x: x[12], reverse=False)
for new_name , url,icon,image,plot,rating,year,genere,original_title,id,heb_name,fav_status,xxx,imdbid in all_data:
addDir3( new_name , url,mode, icon,image,plot,rating=rating,data=year,show_original_year=year,generes=genere,original_title=original_title,id=id,eng_name=original_title,heb_name=new_name,fav_status=fav_status)
regex='title_type=tv_series&start=(.+?)&'
m=re.compile(regex,re.DOTALL).findall(x1)
if len(m)==0:
regex='<div class="desc">.+?a href="(.+?)"'
m2=re.compile(regex,re.DOTALL).findall(x1)
addDir3( '[COLOR gold][I]Next Page[/I][/COLOR]', 'https://www.imdb.com'+m2[0],114, 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTTNmz-ZpsUi0yrgtmpDEj4_UpJ1XKGEt3f_xYXC-kgFMM-zZujsg','https://cdn4.iconfinder.com/data/icons/arrows-1-6/48/1-512.png',o_plot)
elif len(m)>0:
addDir3( '[COLOR gold][I]Next Page[/I][/COLOR]', 'https://www.imdb.com/search/title?title_type=tv_series&start=%s&ref_=adv_nxt'%m[0],114, 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTTNmz-ZpsUi0yrgtmpDEj4_UpJ1XKGEt3f_xYXC-kgFMM-zZujsg','https://cdn4.iconfinder.com/data/icons/arrows-1-6/48/1-512.png',o_plot)
return 'ok'
def get_torrent_file(silent_mode=False):
import shutil
dp = xbmcgui . DialogProgress ( )
dp.create('Please Wait','Checking for Player', '','')
dp.update(0, 'Please Wait','Checking for Player', '' )
def download_file(url,path):
local_filename =os.path.join(path, "1.zip")
# NOTE the stream=True parameter
r = requests.get(url, stream=True)
total_length = r.headers.get('content-length')
if total_length is None: # no content length header
total_length=1
with open(local_filename, 'wb') as f:
dl = 0
total_length = int(total_length)
for chunk in r.iter_content(chunk_size=1024):
dl += len(chunk)
done = int(100 * dl / total_length)
dp.update(done, 'Please Wait','Downloading Player', '' )
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename
def unzip(file,path):
dp.update(100, 'Please Wait','Extracting', '' )
from zfile import ZipFile
zip_file = file
ptp = 'Masterpenpass'
#xbmc.executebuiltin("XBMC.Extract({0}, {1})".format(zip_file, path), True)
zf=ZipFile(zip_file)
#zf.setpassword(bytes(ptp))
#with ZipFile(zip_file) as zf:
zf.extractall(path)
from kodipopcorntime.platform import Platform
binary = "torrent2http"
bin_dataDir=(os.path.join(xbmc.translatePath(Addon.getAddonInfo('profile')), 'resources', 'bin',"%s_%s" %(Platform.system, Platform.arch))).encode('utf-8')
if Platform.system == 'windows':
binary = "torrent2http.exe"
url='https://github.com/DiMartinoXBMC/script.module.torrent2http/raw/master/bin/windows_x86/torrent2http.exe.zip'
file=os.path.join(bin_dataDir,'1.zip')
elif Platform.system == "android":
url='https://github.com/DiMartinoXBMC/script.module.torrent2http/raw/master/bin/android_arm/torrent2http.zip'
file=os.path.join(bin_dataDir,'1.zip')
else:
url='https://github.com/DiMartinoXBMC/script.module.torrent2http/raw/master/bin/linux_arm/torrent2http.zip'
file=os.path.join(bin_dataDir,'1.zip')
torrent_file=os.path.join(xbmc.translatePath(Addon.getAddonInfo('profile')), 'resources', 'bin', "%s_%s" %(Platform.system, Platform.arch), binary).encode('utf-8')
logging.warning(torrent_file)
logging.warning(os.path.isfile(torrent_file))
logging.warning(os.path.exists(bin_dataDir))
if not os.path.exists(bin_dataDir) or not os.path.isfile(torrent_file):
if os.path.exists(bin_dataDir):
shutil.rmtree(bin_dataDir)
os.makedirs(bin_dataDir)
download_file(url,bin_dataDir)
unzip(file,bin_dataDir)
os.remove(file)
else:
if silent_mode==False:
ok=xbmcgui.Dialog().yesno(("Player Exists"),('Download Anyway?'))
if ok:
shutil.rmtree(bin_dataDir)
os.makedirs(bin_dataDir)
download_file(url,bin_dataDir)
unzip(file,bin_dataDir)
os.remove(file)
dp.close()
if silent_mode==False:
xbmcgui.Dialog().ok('Download','[COLOR aqua][I] Success [/I][/COLOR]')
def remove_torrent_file():
import shutil
from kodipopcorntime.platform import Platform
bin_dataDir=(os.path.join(xbmc.translatePath(Addon.getAddonInfo('profile')), 'resources', 'bin',"%s_%s" %(Platform.system, Platform.arch))).encode('utf-8')
if os.path.exists(bin_dataDir):
ok=xbmcgui.Dialog().yesno(("Remove magnet player"),('Remove magnet player?'))
if ok:
shutil.rmtree(bin_dataDir)
xbmcgui.Dialog().ok('Remove','[COLOR aqua][I] Removed [/I][/COLOR]')
else:
xbmcgui.Dialog().ok('Remove','[COLOR aqua][I] Player is Missing [/I][/COLOR]')
def GetJson(url):
html = requests.get(url, headers={"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0'}).content
if html == "":
return None
resultJSON = json.loads(html)
if resultJSON is None or len(resultJSON) < 1:
return None
if resultJSON.has_key("root"):
return resultJSON["root"]
else:
return resultJSON
def GetLabelColor(text, keyColor=None, bold=False, color=None):
if not color:
if keyColor=='prColor':
color='orange'
else:
color='gold'
if bold :
text = '[B]{0}[/B]'.format(text)
return text if color == 'none' else '[COLOR {0}]{1}[/COLOR]'.format(color, text)
def getDisplayName(title, subtitle, programNameFormat, bold=False):
if programNameFormat == 0:
displayName = ' {0} - {1} '.format(GetLabelColor(title, keyColor="prColor", bold=bold) , GetLabelColor(subtitle, keyColor="chColor"))
elif programNameFormat == 1:
displayName = ' {0} - {1} '.format(GetLabelColor(subtitle, keyColor="chColor") , GetLabelColor(title, keyColor="prColor", bold=bold))
return displayName
def get_actor_oscar(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x1=requests.get(url,headers=headers).content
regex='div class="lister-item-image">.+?<img alt="(.+?)".+?src="(.+?)".+?/title/(.+?)/'
m=re.compile(regex,re.DOTALL).findall(x1)
all_imdb={}
m1=[]
for title,img,imdb in m:
if imdb in m1:
m1.append(imdb)
all_imdb[imdb]['name']=all_imdb[imdb]['name']+'$$$$$$'+title
all_imdb[imdb]['img']=all_imdb[imdb]['img']+'$$$$$$'+img
all_imdb[imdb]['done']=1
else:
m1.append(imdb)
all_imdb[imdb]={}
all_imdb[imdb]['name']=title
all_imdb[imdb]['img']=img
all_imdb[imdb]['done']=0
all_data=cache.get(get_data_imdb,24,m1,'Movies', table='pages')
#all_data=get_data_imdb(m,plot)
all_data=sorted(all_data, key=lambda x: x[5], reverse=True)
for new_name , url,icon,image,plot,rating,year,genere,original_title,id,heb_name,fav_status,xxx,imdbid in all_data:
add_p=''
add_img=icon
if imdbid in all_imdb:
if '$$$$$$' in all_imdb[imdbid]['name']:
if all_imdb[imdbid]['done']==1:
index=1
add_p='[COLOR gold]Oscar Winner - '+all_imdb[imdbid]['name'].split('$$$$$$')[index]+'[/COLOR]\n'
add_img=all_imdb[imdbid]['img'].split('$$$$$$')[index]
all_imdb[imdbid]['done']=0
else:
index=0
add_p='[COLOR gold]Oscar Winner - '+all_imdb[imdbid]['name'].split('$$$$$$')[index]+'[/COLOR]\n'
add_img=all_imdb[imdbid]['img'].split('$$$$$$')[index]
all_imdb[imdbid]['done']=1
else:
add_p='[COLOR gold]Oscar Winner - '+all_imdb[imdbid]['name']+'[/COLOR]\n'
add_img=all_imdb[imdbid]['img']
addDir3( new_name , url,4, add_img,image,add_p+plot,rating=rating,data=year,show_original_year=year,generes=genere,original_title=original_title,id=id,eng_name=original_title,heb_name=new_name,fav_status=fav_status)
regex='next-page" href="(.+?)"'
m=re.compile(regex).findall(x1)
if len(m)>0:
addDir3( '[COLOR gold][I]Next Page[/I][/COLOR]', 'https://www.imdb.com'+m[0],134, 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTTNmz-ZpsUi0yrgtmpDEj4_UpJ1XKGEt3f_xYXC-kgFMM-zZujsg','https://cdn4.iconfinder.com/data/icons/arrows-1-6/48/1-512.png','Movies')
return 'ok'
def RemoveFolder(Folder):
import shutil
try:
Folder=xbmc.translatePath(Folder)
if os.path.isdir(Folder):
shutil.rmtree(Folder)
os.makedirs(Folder)
else:
os.makedirs(Folder)
except:
pass
def clear_rd():
Addon.setSetting('rd.client_id','')
Addon.setSetting('rd.auth','')
Addon.setSetting('rd.refresh','')
Addon.setSetting('rd.secret','')
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', ('Cleared').decode('utf8'))).encode('utf-8'))
def re_enable_rd():
clear_rd()
import real_debrid
rd = real_debrid.RealDebrid()
rd.auth()
rd = real_debrid.RealDebrid()
rd_domains=(rd.getRelevantHosters())
def run_page():
url='https://www.toptutorials.co.uk/android/'
url_win='http://mirrors.kodi.tv/releases/windows/win32/'
osWin = xbmc.getCondVisibility('system.platform.windows')
osOsx = xbmc.getCondVisibility('system.platform.osx')
osLinux = xbmc.getCondVisibility('system.platform.linux')
osAndroid = xbmc.getCondVisibility('System.Platform.Android')
if osOsx:
# ___ Open the url with the default web browser
xbmc.executebuiltin("System.Exec(open "+url+")")
elif osWin:
logging.warning('Run')
# ___ Open the url with the default web browser
xbmc.executebuiltin("System.Exec(cmd.exe /c start "+url_win+")")
elif osLinux and not osAndroid:
# ___ Need the xdk-utils package
xbmc.executebuiltin("System.Exec(xdg-open "+url+")")
elif osAndroid:
# ___ Open media with standard android web browser
xbmc.executebuiltin("StartAndroidActivity(com.android.browser,android.intent.action.VIEW,,"+url+")")
# ___ Open media with Mozilla Firefox
xbmc.executebuiltin("StartAndroidActivity(org.mozilla.firefox,android.intent.action.VIEW,,"+url+")")
# ___ Open media with Chrome
xbmc.executebuiltin("StartAndroidActivity(com.android.chrome,,,"+url+")")
def live_tv():
#taken from WOW sports live apk
headers={
'Cache-Control': 'max-age=0',
'Data-Agent': 'The Stream',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'utf-8',
'User-Agent': 'okhttp/3.8.1'}
url='http://wowsportslive.com/the_stream/the_stream/api/get_posts/?api_key=cda11uT8cBLzm6a1YvsiUWOEgrFowk95K2DM3tHAPRCX4ypGjN&page=1&count=600'
x=requests.get(url,headers=headers).json()
for items in x['posts']:
plot=items['channel_description']
icon='http://wowsportslive.com/the_stream/the_stream/upload/'+items['channel_image']
name=items['channel_name']
link=items['channel_url']
addLink(name,link,5,False,iconimage=icon,fanart=icon,description=plot)
def trakt_liked(url,iconImage,fanart,page):
o_url=url
responce,pages=call_trakt(url,pagination=True,page=page)
for items in responce:
url=items['list']['user']['username']+'$$$$$$$$$$$'+items['list']['ids']['slug']
addDir3(items['list']['name'],url,31,iconImage,fanart,items['list']['description'])
if int(page)<int(pages):
addDir3('[COLOR aqua][I]Next Page[/COLOR][/I]',o_url,142,iconImage,fanart,'[COLOR aqua][I]Next Page[/COLOR][/I]',data=str(int(page)+1))
def Mail_log(url):
import mail_file
if Addon.getSetting('email')=='':
Show_Dialog('','You Need To Enter Your Email Details','')
Addon.openSettings()
mail_file.EmailLog(url)
else:
if url=='someone':
mail_file.EmailLog('')
else:
mail_file.EmailLog('ME')
def scraper_settings():
xbmcaddon.Addon('script.module.universalscrapers').openSettings()
def resolver_settings():
try:
import resolveurl
xbmcaddon.Addon('script.module.resolveurl').openSettings()
except:
import urlresolver
xbmcaddon.Addon('script.module.urlresolver').openSettings()
def livetv():
addDir3('Fluxustv','tv',148,'https://2.bp.blogspot.com/-3gYWiZSfuN8/XJR2jmBs7tI/AAAAAAAAESA/1kThPLRZg1QUJKZx79JXJNfBJ2Z20YjYACLcBGAs/s1600/ill-ftv-iptv.png','https://koditips.com/wp-content/uploads/fluxus-tv-kodi.png','Fluxustv\n [COLOR gold] List will automatically refresh [/COLOR]')
addDir3('Fluxustv-radio','radio',148,'https://2.bp.blogspot.com/-mmqSc5frwNo/XJR3enXl1eI/AAAAAAAAESU/crnDvQ2v8KAnMPW-JGtqJQvJgpk4W1l1wCLcBGAs/s1600/ill-ftv-radio.png','https://koditips.com/wp-content/uploads/fluxus-tv-kodi.png','Fluxustv -Radio\n [COLOR gold] List will automatically refresh [/COLOR]')
#addDir3('Red TV','tv',151,'https://yt3.ggpht.com/a/AGF-l79G3vzEAHCBCrOR23oUNUxlSQMVJXVVkf_-Kw=s900-mo-c-c0xffffffff-rj-k-no','https://red-v.tv/assets/img/redv_share.jpg','[COLOR red]Red TV[/COLOR]')
headers = {
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0.1; Le X526 Build/IIXOSOP5801910121S)',
'Connection': 'Keep-Alive'
}
url='https://next.json-generator.com/api/json/get/4yEL0VioI'
#x=requests.get(url,headers=headers).json()
#logging.warning(x)
#index=0
#for items in x:
# addDir3(items['name'],str(index),147,items['imag'],items['imag'],items['name'])
# index+=1
def livetv_chan(url):
if int(url)>4:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Not yet...'.decode('utf8'))).encode('utf-8'))
return 0
else:
urls=["https://next.json-generator.com/api/json/get/41g5SJojL","https://next.json-generator.com/api/json/get/4JCte4iiU","https://next.json-generator.com/api/json/get/EklkUrnj8","https://next.json-generator.com/api/json/get/4Jkh1qTi8","https://next.json-generator.com/api/json/get/N1hEg5poU"]
headers = {
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0.1; Le X526 Build/IIXOSOP5801910121S)',
'Connection': 'Keep-Alive'
}
x=requests.get(urls[int(url)],headers=headers).json()
for items in x:
addLink(items['title'], items['urlvideo'],5,False,items['image'],items['image'],items['title'])
def flexustv(url):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
if url=='tv':
u='https://fluxustv.blogspot.com/p/iptv.html'
elif url=='radio':
u='https://fluxustv.blogspot.com/p/radio.html'
x=requests.get(u,headers=headers).content
regex='input id="myInput1" size=".+?" type="text" value="(.+?)"'
m=re.compile(regex).findall(x)[0]
m3u8_cont(name,m)
def one_click(url):
try:
page=int(url)
except:
page=1
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get('https://yst.am/api/v2/list_movies.json?page=%s&limit=50&sort_by=download_count'%str(page),headers=headers).json()
all_movies={}
all_mag=[]
for items in x['data']['movies']:
if 'torrents' in items:
for tor in items['torrents']:
all_mag.append(tor['hash'])
all_hased=[]
import real_debrid
rd = real_debrid.RealDebrid()
hashCheck = rd.checkHash(all_mag)
for hash in hashCheck:
if 'rd' in hashCheck[hash]:
if len(hashCheck[hash]['rd'])>0:
all_hased.append(hash)
for items in x['data']['movies']:
all_movies['title']=items['title']
all_movies['year']=items['year']
all_movies['rating']=items['rating']
if 'genres' in items:
genere = u' / '.join(items['genres'])
all_movies['genre']=genere
all_movies['icon']=items['large_cover_image']
all_movies['fanart']=items['background_image_original']
all_movies['links']=[]
if 'torrents' in items:
all_q=[]
for tor in items['torrents']:
if str(tor['hash']).lower() in all_hased:
all_movies['links'].append(('DIRECT link',tor['quality'],tor['size'],tor['seeds'],tor['peers'],tor['hash']))
all_q.append(tor['quality'])
if len(all_movies['links'])>0:
all_movies['plot']='[COLOR gold]'+' / '.join(all_q)+'[/COLOR]\n'+items['summary']
addLink(all_movies['title'],json.dumps(all_movies['links']),5,False,id=items['imdb_code'],iconimage=all_movies['icon'],fanart=all_movies['fanart'],description=all_movies['plot'],video_info=json.dumps(all_movies),generes=genere)
addDir3('[COLOR aqua][I]Next Page[/COLOR][/I]',str(int(page)+1),149,'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTTNmz-ZpsUi0yrgtmpDEj4_UpJ1XKGEt3f_xYXC-kgFMM-zZujsg','https://cdn4.iconfinder.com/data/icons/arrows-1-6/48/1-512.png','[COLOR aqua][I]Next Page[/COLOR][/I]')
def one_click_free(url,iconimage,fanart):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'he,he-IL;q=0.8,en-US;q=0.5,en;q=0.3',
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
}
x=requests.get(url,headers=headers).content
o_x=x
regex='<div class="notifications member">(.+?)<div class="pageTitle">Main Page<'
m_p=re.compile(regex,re.DOTALL).findall(x)[0]
regex_pre='<div class="item">(.+?)<div class="clr"></div>'
m_p2=re.compile(regex_pre,re.DOTALL).findall(m_p)
for items_i in m_p2:
regex_pre='img src="(.+?)".+?<div class="film-title"><a href="(.+?)">(.+?)<span>(.+?)<.+?Description</b><br>(.+?)<'
m_p3=re.compile(regex_pre,re.DOTALL).findall(items_i)
regex_g='<div class="sliderh">(.+?)<span class="separator">'
m_g_pre=re.compile(regex_g).findall(items_i)
all_g=[]
if len(m_g_pre)>0:
regex='a href=".+?">(.+?)<'
m=re.compile(regex).findall(m_g_pre[0])
for items in m:
if len(all_g)<4:
all_g.append(items)
for img,link,name,year,plot in m_p3:
video_data={}
video_data['title']=name
video_data['year']=year
video_data['plot']=plot
video_data['genre']='/'.join(all_g)
img='https://moviesmax.net'+img
addLink(name,link,5,False,iconimage=img,fanart=img,generes='/'.join(all_g),description=plot,data=year,id='find id',video_info=json.dumps(video_data))
regex_pre='<div class="short_entry grid">(.+?)<div class="bg"></div>'
m_pre=re.compile(regex_pre,re.DOTALL).findall(x)
for items in m_pre:
regex='<a href="(.+?)".+?img src="(.+?)".+?div class="title">(.+?)<span>(.+?)<.+?td class="label">Genre:</td><td>(.+?)<.+?div class="story">(.+?)<'
m=re.compile(regex,re.DOTALL).findall(items)
link=m[0][0]
img='https://moviesmax.net'+m[0][1]
name=m[0][2]
year=m[0][3]
genre=m[0][4]
plot=m[0][5]
video_data={}
video_data['title']=name
video_data['year']=year
video_data['plot']=plot
video_data['genre']=genre
addLink(name,link,5,False,iconimage=img,fanart=img,description=plot,data=year,id='find id',video_info=json.dumps(video_data))
regex='span class="current">.+?</span> <a href="(.+?)"'
m=re.compile(regex).findall(o_x)
if len(m)>0:
addDir3('[COLOR aqua][I]Next Page[/COLOR][/I]',m[0],150,'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcTTNmz-ZpsUi0yrgtmpDEj4_UpJ1XKGEt3f_xYXC-kgFMM-zZujsg','https://cdn4.iconfinder.com/data/icons/arrows-1-6/48/1-512.png','[COLOR aqua][I]Next Page[/COLOR][/I]')
def red_tv(icon,fanart):
headers={
'Referer': 'http://welcome.com/',
'Authorization': 'Basic aGVsbG9NRjpGdWNrb2Zm',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0.1; Le X526 Build/IIXOSOP5801910121S)',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
}
data={'check':1,
'user_id':9999,
'version':31}
x=requests.post('http://163.172.111.138:8030/rbtv/i/redbox.tv/',headers=headers,data=data).json()
for items in x['categories_list']:
addDir3(items['cat_name'],items['cat_id'],152,icon,fanart,'[COLOR aqua][I]%s[/COLOR][/I]'%items['cat_name'])
def red_tv_chan(url,icon,fanart):
headers={
'Referer': 'http://welcome.com/',
'Authorization': 'Basic aGVsbG9NRjpGdWNrb2Zm',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0.1; Le X526 Build/IIXOSOP5801910121S)',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'gzip',
}
data={'check':1,
'user_id':9999,
'version':31}
x=requests.post('http://163.172.111.138:8030/rbtv/i/redbox.tv/',headers=headers,data=data).json()
headers2={
'Modified': '10516203243506373899',
'Authorization': 'Basic eWFyYXBuYWthYW1rYXJvOnR1bmduYWtpYWthcm8=',
'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 6.0.1; Le X526 Build/IIXOSOP5801910121S)',
'Connection': 'Keep-Alive',
'Accept-Encoding': 'utf-8'}
token=requests.get('http://51.15.209.90:8800/fio/3b.rbt/',headers=headers2).content
headers3={
'User-Agent': 'stagefright/1.2 (Linux;Android 4.0.3) Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10 QuickTime',
'Accept': '*/*',
}
for items in x['eY2hhbm5lbHNfbGlzdA==']:
if url==items['cat_id']:
link=items['Qc3RyZWFtX2xpc3Q='][0]['Bc3RyZWFtX3VybA=='][1:].decode('base64')
img=items['abG9nb191cmw='][1:].decode('base64')
title=items['ZY19uYW1l'][:-1].decode('base64')
head=urllib.urlencode(headers3)
#link=link+token+"|"+head
y=requests.get(link+token,headers=headers3)
logging.warning(link+token)
logging.warning(headers3)
logging.warning( y.headers)
logging.warning( y.content)
a+=1
addLink(title, link,5,False,img,img,'[COLOR aqua][I]%s[/COLOR][/I]'%items['cat_name'])
params=get_params()
for items in params:
params[items]=params[items].replace(" ","%20")
url=None
name=None
mode2=None
mode=None
iconimage=None
fanart=None
description=' '
original_title=' '
fast_link=''
data=0
id=' '
saved_name=' '
prev_name=' '
isr=' '
season="%20"
episode="%20"
show_original_year=0
heb_name=' '
tmdbid=' '
eng_name=' '
dates=' '
data1='[]'
fav_status='false'
only_torrent='no'
only_heb_servers='0'
new_windows_only=False
meliq='false'
tv_movie='movie'
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
tv_movie=(params["tv_movie"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
iconimage=urllib.unquote_plus(params["iconimage"])
except:
pass
try:
mode2=int(params["mode2"])
except:
try:
mode=(params["mode"])
except:
pass
try:
mode2=int(params["mode2"])
except:
pass
try:
fanart=urllib.unquote_plus(params["fanart"])
except:
pass
try:
description=urllib.unquote_plus(params["description"].encode('utf-8'))
except:
pass
try:
data=urllib.unquote_plus(params["data"])
except:
pass
try:
original_title=(params["original_title"])
except:
pass
try:
id=(params["id"])
except:
pass
try:
season=(params["season"])
except:
pass
try:
episode=(params["episode"])
except:
pass
try:
tmdbid=(params["tmdbid"])
except:
pass
try:
eng_name=(params["eng_name"])
except:
pass
try:
show_original_year=(params["show_original_year"])
except:
pass
try:
heb_name=urllib.unquote_plus(params["heb_name"])
except:
pass
try:
isr=(params["isr"])
except:
pass
try:
saved_name=clean_name(params["saved_name"],1)
except:
pass
try:
prev_name=(params["prev_name"])
except:
pass
try:
dates=(params["dates"])
except:
pass
try:
data1=(params["data1"])
except:
pass
try:
fast_link=urllib.unquote_plus(params["fast_link"])
except:
pass
try:
fav_status=(params["fav_status"])
except:
pass
try:
only_torrent=(params["only_torrent"])
except:
pass
try:
only_heb_servers=(params["only_heb_servers"])
except:
pass
try:
new_windows_only=(params["new_windows_only"])
new_windows_only = new_windows_only == "true"
except:
pass
try:
meliq=(params["metaliq"])
except:
pass
episode=str(episode).replace('+','%20')
season=str(season).replace('+','%20')
original_title=original_title.replace('+','%20').replace('%3A','%3a')
all_data=((name,url,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr))
#ClearCache()
logging.warning('mode2')
logging.warning(mode2)
logging.warning('mode')
logging.warning(mode)
#logging.warning(params)
#from youtube_ext import get_youtube_link3
#link= get_youtube_link3('https://www.youtube.com/watch?v=b3SHqoMDSGg').replace(' ','%20')
#xbmc.Player().play(link)
if Addon.getSetting("enable_db_bk")=='true':
time_to_save_db=int(Addon.getSetting("db_backup"))*24
bk=cache.get(backup_vik,time_to_save_db, table='db_backup')
#getsubs( 'The Avengers', 'tt0848228', '%20', '%20','The Avengers')
st=''
rest_data=[]
read_data2=[]
AWSHandler.UpdateDB()
#rd_domains=cache.get(get_rd_servers, 72, table='pages')
#logging.warning(rd_domains)
#import real_debrid
#rd = real_debrid.RealDebrid()
#url='http://rapidgator.net/file/a5062d1cd8bd121923972d10ee4db27f/Black.Panther.2018.BluRay.1080p.DTS-HD.MA.7.1.x264.dxva-FraMe..'
#url='http://nitroflare.com/view/0AE76EDD482C6EE/emd-blackpanther.2160p.mkv'
#url='https://uploadgig.com/file/download/CB375525d73be9bc/Black.Panther.2018.3D.BluRay.1080p.Half-SBS.DTS-HD.MA7.1.x264-LEGi0N..'
#link=rd.get_link(url)
#logging.warning('link')
#logging.warning(link)
try:
if mode!=None:
a=int(mode)
mode2=a
mode=None
except:
pass
if mode!=None:
from new_jen2 import run_me
logging.warning(url)
run_me(url)
logging.warning('End Runme')
elif mode2==None or url==None or len(url)<1 and len(sys.argv)>1:
logging.warning('threading.active_count:'+str(threading.active_count()))
logging.warning('threading.current_thread:'+str(threading.current_thread().getName()))
for thread in threading.enumerate():
logging.warning("Thread name is %s." % thread.getName())
if Addon.getSetting("chache_clean")=='true':
ClearCache()
main_menu()
if Addon.getSetting("ghaddr")!='aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL21vc2hlcDE1L2JhY2svbWFzdGVyLzUudHh0':
Addon.setSetting("ghaddr", 'aHR0cHM6Ly9yYXcuZ2l0aHVidXNlcmNvbnRlbnQuY29tL21vc2hlcDE1L2JhY2svbWFzdGVyLzUudHh0')
elif mode2==2:
get_genere(url,iconimage)
elif mode2==3:
get_movies(url,isr)
elif mode2==4:
if done1!=2:
st,rest_data=get_sources(name,url,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,dates=dates,data1=data1,fast_link=fast_link,fav_status=fav_status,only_torrent=only_torrent,only_heb_servers=only_heb_servers,new_windows_only=new_windows_only,metaliq=meliq)
elif mode2==5:
logging.warning('isr6:'+isr)
play(name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id)
elif mode2==6:
auto_play(name,url,iconimage,fanart,description,data,season,episode,original_title,saved_name,heb_name,show_original_year,eng_name,isr,prev_name,id)
elif mode2==7:
get_seasons(name,url,iconimage,fanart,description,data,original_title,id,heb_name,isr)
elif mode2==8:
get_episode(name,url,iconimage,fanart,description,data,original_title,id,season,tmdbid,show_original_year,heb_name,isr)
elif mode2==10:
get_qu(url)
elif mode2==11:
search_entered=''
if 'search' in url :
keyboard = xbmc.Keyboard(search_entered, 'Enter Search')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
from hebdub_movies import get_dub
get_dub(url,search_entered=search_entered)
elif mode2==12:
search_dub(name,url,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,show_original_year,heb_name)
elif mode2==13:
movies_menu()
elif mode2==14:
tv_menu()
elif mode2==15:
search_menu()
elif mode2==16:
ClearCache()
elif mode2==17:
save_to_fav(description)
elif mode2==18:
open_fav(url)
elif mode2==19:
remove_to_fav(description)
elif mode2==20:
remove_fav_num(description)
elif mode2==21:
play_by_subs(name,url,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,saved_name,original_title)
elif mode2==22:
activate_torrent(name,url,iconimage,fanart,description,data,original_title,season,episode,id,eng_name,saved_name)
elif mode2==23:
run_test(name)
elif mode2==24:
open_settings()
elif mode2==25:
play_trailer(id,tv_movie)
elif mode2==26:
movie_recomended()
elif mode2==27:
tv_recomended()
elif mode2==28:
latest_dvd(url)
elif mode2==29:
if Addon.getSetting("use_trak")=='false':
xbmcgui.Dialog().ok('Destiny of Deathstar','[COLOR lightseagreen]Enable TRAKT in Settings First[/COLOR]')
else:
main_trakt()
elif mode2==30:
reset_trakt()
elif mode2==31:
get_trk_data(url,data)
elif mode2==32:
logging.warning('isr:'+isr)
read_data2=last_viewed(url,isr=isr)
elif mode2==33:
scan_direct_links(url)
elif mode2==34:
remove_from_trace(name,original_title,id,season,episode)
elif mode2==35:
play_level_movies(url)
elif mode2==36:
update_providers()
elif mode2==37:
live()
elif mode2==38:
mysettings()
elif mode2==40:
live_tv()
elif mode2==41:
fast_play(url)
elif mode2==42:
get_jen_cat()
elif mode2==43:
#logging.warning(url)
get_jen_list(url,iconimage,fanart)
elif mode2==44:
kids_world()
elif mode2==49:
last_played_c()
elif mode2==54:
display_results(url)
elif mode2==55:
get_m3u8()
elif mode2==56:
m3u8_cont(name,url)
elif mode2==58:
eng_anim()
elif mode2==59:
next_anime(url)
elif mode2==60:
anime_ep(url,iconimage)
elif mode2==61:
play_anime(name,url,iconimage)
elif mode2==62:
search_anime()
elif mode2==63:
progress_trakt(url)
elif mode2==64:
get_trakt()
elif mode2==65:
add_remove_trakt(name,original_title,id,season,episode)
elif mode2==66:
get_group_m3u8(url,description)
elif mode2==67:
download_file(url)
elif mode2==68:
cartoon()
elif mode2==69:
cartoon_list(url)
elif mode2==70:
cartoon_episodes(url)
elif mode2==71:
play_catoon(name,url)
elif mode2==72:
by_actor(url)
elif mode2==73:
actor_m(url)
elif mode2==74:
search_actor()
elif mode2==75:
last_sources()
elif mode2==76:
acestream()
elif mode2==77:
search_ace()
elif mode2==78:
chan_ace(name,url,description)
elif mode2==79:
my_ace()
elif mode2==80:
logging.warning(data)
if data=='trakt':
from trakt_jen import trakt
trakt(url)
elif data=='tmdb':
from trakt_jen import tmdb
tmdb(name,data)
elif 'imdb' in data:
from trakt_jen import imdb
imdb(url,data)
elif mode2==81:
logging.warning(url)
get_next_jen(url,iconimage,fanart)
elif mode2==82:
from jen import pluginquerybyJSON
pluginquerybyJSON(url)
elif mode2==83:
xbmc.executebuiltin('Container.update(' + url + ')')
elif mode2==89:
restore_backup()
elif mode2==90:
check_ftp_conn()
elif mode2==91:
last_viewed_tvshows(url)
elif mode2==92:
open_ftp()
elif mode2==93:
tv_chan()
elif mode2==94:
build_chan(url)
elif mode2==95:
add_my_chan()
elif mode2==96:
remove_chan(name)
elif mode2==97:
play_custom(url)
elif mode2==98:
server_test()
elif mode2==99:
xbmc.executebuiltin(url)
elif mode2==100:
fix_setting(force=True)
elif mode2==101:
tv_neworks()
elif mode2==102:
xbmc.executebuiltin(('ActivateWindow(10025,"plugin://plugin.video.destinyds/?name=%s&url=%s&iconimage=%s&fanart=%s&description=%s&data=%s&original_title=%s&id=%s&season=%s&tmdbid=%s&show_original_year=%s&heb_name=%s&isr=%s&mode2=8",return)'%(name,url,iconimage,fanart,description,data,original_title,id,season,tmdbid,show_original_year,heb_name,isr)))
elif mode2==103:
xbmc.executebuiltin(('ActivateWindow(10025,"plugin://plugin.video.destinyds/?name=''&mode2=None",return)'))
elif mode2==105:
nba(url,iconimage,fanart)
elif mode2==107:
deep_nba(url,iconimage,fanart)
elif mode2==108:
play_nba(name,url,iconimage,fanart)
elif mode2==109:
last_ep_aired(id)
elif mode2==110:
last_ep_aired(id)
elif mode2==112:
movie_prodiction()
elif mode2==113:
last_tv_subs(url)
elif mode2==114:
must_see(description,url)
elif mode2==118:
get_torrent_file()
elif mode2==119:
remove_torrent_file()
elif mode2==120:
do_bakcup(silent=url)
elif mode2==131:
build_jen_db()
elif mode2==132:
current_folder = os.path.dirname(os.path.realpath(__file__))
file = open(os.path.join(current_folder, 'explain.txt'), 'r')
msg= file.read()
file.close()
TextBox_help('What is Destiny of Deathstar', msg)
elif mode2==133:
get_multi_year(url,int(original_title),int(data))
elif mode2==134:
get_actor_oscar(url)
elif mode2==137:
clear_rd()
elif mode2==138:
re_enable_rd()
elif mode2==140:
run_page()
elif mode2==141:
from new_jen2 import run_me
run_me(url)
elif mode2==142:
trakt_liked(url,iconimage,fanart,data)
elif mode2==143:
Mail_log(url)
elif mode2==144:
scraper_settings()
elif mode2==145:
resolver_settings()
elif mode2==146:
livetv()
elif mode2==147:
livetv_chan(url)
elif mode2==148:
flexustv(url)
elif mode2==149:
one_click(url)
elif mode2==150:
one_click_free(url,iconimage,fanart)
elif mode2==151:
red_tv(iconimage,fanart)
elif mode2==152:
red_tv_chan(url,iconimage,fanart)
elif mode2==999:
xbmc.executebuiltin((u'Notification(%s,%s)' % ('Destiny of Deathstar', 'Episode Not Aired Yet...'.decode('utf8'))).encode('utf-8'))
if len(sys.argv)>1:
if Addon.getSetting("lock_display")=='true':
if mode2==4 or mode2==21 or mode2==63:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
else:
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
if Addon.getSetting("order_jen")=='1' and mode2==43:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
if Addon.getSetting("order_jen")=='0' and mode2==43:
xbmcplugin.setContent(int(sys.argv[1]), 'movies')
if mode2==50:
xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
logging.warning(st)
logging.warning(xbmc.Player().isPlaying())
if st!='ENDALL' and mode!=5:
logging.warning('once_fast_play3:'+str(once_fast_play))
check=False
if Addon.getSetting("new_window_type2")!='3':
check=True
elif once_fast_play==0:
check=True
if meliq=='false' and check:
xbmcplugin.endOfDirectory(int(sys.argv[1]))
else:
a=1
'''
if ((not( Addon.getSetting("new_source_menu")=='true' and mode2==4 ) or only_torrent=='yes') and new_windows_only==False) or st==990:
xbmcplugin.endOfDirectory(int(sys.argv[1]))
if mode2==4:
xbmc.executebuiltin("Dialog.Close(busydialog)")
'''
#listitem = xbmcgui.ListItem('')
#xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem)
else:
xbmc.executebuiltin("Dialog.Close(busydialog)")
logging.warning('ENDALLHERER')
if len(rest_data)>0:
thread=[]
logging.warning('rest_of_result')
time_to_save, original_title,year,original_title2,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local=rest_data[0]
thread.append(Thread(get_rest_s, time_to_save,original_title,year,original_title,season,episode,id,eng_name,show_original_year,heb_name,isr,get_local))
thread[0].start()
if len(read_data2)>0:
url_o,match=read_data2[0]
thread=[]
thread.append(Thread(get_Series_trk_data,url_o,match))
import datetime
strptime = datetime.datetime.strptime
thread[0].start()
dbcur.close()
dbcon.close()
logging.warning('END ALL Directory')
if 0:#mode!=5:
thread=[]
thread.append(Thread(close_ok))
thread[0].start()
if done1==2:
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
'''
screen=xbmc.getInfoLabel('System.ScreenMode ')
logging.warning(screen)
if screen=='Windowed':
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
cc=0
while done1_1!=3:
xbmc.sleep(100)
cc+=1
if cc>300:
break
logging.warning('done3')
xbmc.sleep(500)
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
xbmc.sleep(500)
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
x=0
play_time=int(Addon.getSetting("play_full_time"))
while x<((play_time*10)+10):
#logging.warning('change to Fullscreen')
screen=xbmc.getInfoLabel('System.ScreenMode ')
if screen=='Windowed':
xbmc.executebuiltin( "XBMC.Action(Fullscreen)" )
#else:
# break
x+=1
xbmc.sleep(100)
'''
#sys.exit()
done1=1
logging.warning('Fullscreen')
| [
"[email protected]"
] | |
fe0cc4da59a94d0df78036600816503cfbc23403 | b2472967910be9c12576f0f97d33bca0576a8667 | /atcoder-old/2014/0510_abc008/a.py | 4994aed624cbe5ab8c5fa344b8f2893e6fc4367a | [] | no_license | ykmc/contest | 85c3d1231e553d37d1235e1b0fd2c6c23f06c1e4 | 69a73da70f7f987eb3e85da503ea6da0744544bd | refs/heads/master | 2020-09-01T22:56:10.444803 | 2020-07-14T11:36:43 | 2020-07-14T11:36:43 | 217,307,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | # Python3 (3.4.3)
import sys
input = sys.stdin.readline
# -------------------------------------------------------------
# function
# -------------------------------------------------------------
# -------------------------------------------------------------
# main
# -------------------------------------------------------------
S,T = map(int,input().split())
print(T-S+1) | [
"[email protected]"
] | |
e72b191ffe48b81cecb98695841bbeb849806378 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/pynput/pynput/_util.pyi | 4202bcb5464e285fb3440fe793aaaedd606e541d | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 2,715 | pyi | import sys
import threading
from _typeshed import Self
from collections.abc import Callable
from queue import Queue
from types import ModuleType, TracebackType
from typing import Any, ClassVar, Generic, TypeVar
from typing_extensions import ParamSpec, TypedDict
_T = TypeVar("_T")
_AbstractListener_T = TypeVar("_AbstractListener_T", bound=AbstractListener)
_P = ParamSpec("_P")
class _RESOLUTIONS(TypedDict):
darwin: str
uinput: str
xorg: str
RESOLUTIONS: _RESOLUTIONS
def backend(package: str) -> ModuleType: ...
def prefix(base: type | tuple[type | tuple[Any, ...], ...], cls: type) -> str | None: ...
class AbstractListener(threading.Thread):
class StopException(Exception): ...
_HANDLED_EXCEPTIONS: ClassVar[tuple[type | tuple[Any, ...], ...]] # undocumented
_suppress: bool # undocumented
_running: bool # undocumented
_thread: threading.Thread # undocumented
_condition: threading.Condition # undocumented
_ready: bool # undocumented
_queue: Queue[sys._OptExcInfo | None] # undocumented
daemon: bool
def __init__(self, suppress: bool = ..., **kwargs: Callable[..., bool | None] | None) -> None: ...
@property
def suppress(self) -> bool: ...
@property
def running(self) -> bool: ...
def stop(self) -> None: ...
def __enter__(self: Self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def wait(self) -> None: ...
def run(self) -> None: ...
@classmethod
def _emitter(cls, f: Callable[_P, _T]) -> Callable[_P, _T]: ... # undocumented
def _mark_ready(self) -> None: ... # undocumented
def _run(self) -> None: ... # undocumented
def _stop_platform(self) -> None: ... # undocumented
def join(self, *args: Any) -> None: ...
class Events(Generic[_T, _AbstractListener_T]):
_Listener: type[_AbstractListener_T] | None # undocumented
class Event:
def __eq__(self, other: object) -> bool: ...
_event_queue: Queue[_T] # undocumented
_sentinel: object # undocumented
_listener: _AbstractListener_T # undocumented
start: Callable[[], None]
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
def __enter__(self: Self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ...
def __iter__(self: Self) -> Self: ...
def __next__(self) -> _T: ...
def get(self, timeout: float | None = ...) -> _T | None: ...
def _event_mapper(self, event: Callable[_P, object]) -> Callable[_P, None]: ...
class NotifierMixin: ...
| [
"[email protected]"
] | |
251ac003ca5d4741bf87599906a0b4ccc9411585 | a4f5d92264f6ff32021945fd70041dc90840af49 | /docstrings/tt_postscript.py | 55f971456aebc0ad3762348da706a64aa6edf0f4 | [
"BSD-2-Clause-Views"
] | permissive | matplotlib/freetypy | 95da1c583f05726de8bd4a18ec5008cd0539909d | 601be6e816511a304302d6aafdbc24031c4df5df | refs/heads/master | 2023-08-20T05:33:00.601874 | 2017-10-23T18:35:10 | 2017-10-23T18:35:10 | 11,617,229 | 5 | 7 | null | 2017-10-23T18:35:11 | 2013-07-23T19:32:39 | Python | UTF-8 | Python | false | false | 2,488 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
TT_Postscript__init__ = """
TrueType PostScript table.
"""
TT_Postscript_format_type = """
Format of this table.
"""
TT_Postscript_italic_angle = """
Italic angle in degrees.
"""
TT_Postscript_underline_position = """
Underline position.
"""
TT_Postscript_underline_thickness = """
Underline thickness.
"""
TT_Postscript_is_fixed_pitch = """
If `True`, the font is monospaced.
"""
TT_Postscript_min_mem_type42 = """
Minimum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_max_mem_type42 = """
Maximum memory usage when the font is downloaded as a Type 42 font.
"""
TT_Postscript_min_mem_type1 = """
Minimum memory usage when the font is downloaded as a Type 1 font.
"""
TT_Postscript_max_mem_type1 = """
Maximum memory usage when the font is downloaded as a Type 1 font.
"""
| [
"[email protected]"
] | |
db77fde274d2b3dadccad3cddd3774d816d1ebe2 | f571590e3c1787d183e00b81c408362e65671f76 | /Exercisebolean.py | d76a0e8a16cc68826d3b91a1c9b8e7aaaa67d698 | [] | no_license | neymarthan/project1 | 0b3d108dd8eb4b6fa5093525d469d978faf88b88 | 5e07f9dff181bb310f3ce2c7818a8c6787d4b116 | refs/heads/master | 2022-12-26T08:44:53.464398 | 2020-10-06T09:14:29 | 2020-10-06T09:14:29 | 279,528,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | string=input('Enter the string: ')
print('This is what I found about that string:')
if string.isalnum():
print('The string is alphanumeric')
if string.alphanumeric():
print('The string contains only alphabetic characters')
if string.alphanumeric():
print('The letters in the string are all lowercase')
| [
"[email protected]"
] | |
02e6babb21c73f39d790ed41dff2417ab0a89fd8 | 056ff03373c07ec60f715333f8af17ea6ad3c615 | /labs/test_core.py | 1bdffa5a16592f3036993621d1b6ab5f58b4bdd1 | [
"MIT"
] | permissive | MITLLRacecar/racecar-daniel-chuang | f7da7f0c6ea7b86c5dff007996d6eb6d7a9de26c | 5d22aac5cbbd77d9380f3e4afaf3e0009a1791de | refs/heads/master | 2023-06-18T11:10:28.771574 | 2021-07-23T01:30:10 | 2021-07-23T01:30:10 | 383,568,872 | 0 | 0 | MIT | 2021-07-23T01:30:10 | 2021-07-06T18:47:38 | Jupyter Notebook | UTF-8 | Python | false | false | 6,335 | py | """
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
A simple program which can be used to manually test racecar_core functionality.
"""
########################################################################################
# Imports
########################################################################################
import sys
sys.path.insert(1, "../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
max_speed = 0
update_slow_time = 0
show_triggers = False
show_joysticks = False
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
global max_speed
global update_slow_time
global show_triggers
global show_joysticks
print("Start function called")
max_speed = 0.25
update_slow_time = 0.5
show_triggers = False
show_joysticks = False
rc.set_update_slow_time(update_slow_time)
rc.drive.set_max_speed(max_speed)
rc.drive.stop()
# Print start message
print(
">> Test Core: A testing program for the racecar_core library.\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" Left bumper = decrease max speed\n"
" Right bumper = increase max speed\n"
" Left joystick click = print trigger values\n"
" Right joystick click = print joystick values\n"
" A button = Display color image\n"
" B button = Display depth image\n"
" X button = Display lidar data\n"
" Y button = Display IMU data\n"
)
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
global max_speed
global update_slow_time
global show_triggers
global show_joysticks
# Check if each button was_pressed or was_released
for button in rc.controller.Button:
if rc.controller.was_pressed(button):
print(f"Button [{button.name}] was pressed")
if rc.controller.was_released(button):
print(f"Button [{button.name}] was released")
# Click left and right joystick to toggle showing trigger and joystick values
left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT)
right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT)
right_joystick = rc.controller.get_joystick(rc.controller.Joystick.RIGHT)
if rc.controller.was_pressed(rc.controller.Button.LJOY):
show_triggers = not show_triggers
if rc.controller.was_pressed(rc.controller.Button.RJOY):
show_joysticks = not show_joysticks
if show_triggers:
print(f"Left trigger: [{left_trigger}]; Right trigger: [{right_trigger}]")
if show_joysticks:
print(f"Left joystick: [{left_joystick}]; Right joystick: [{right_joystick}]")
# Use triggers and left joystick to control car (like default drive)
rc.drive.set_speed_angle(right_trigger - left_trigger, left_joystick[0])
# Change max speed and update_slow time when the bumper is pressed
if rc.controller.was_pressed(rc.controller.Button.LB):
max_speed = max(1 / 16, max_speed / 2)
rc.drive.set_max_speed(max_speed)
update_slow_time *= 2
rc.set_update_slow_time(update_slow_time)
print(f"max_speed set to [{max_speed}]")
print(f"update_slow_time set to [{update_slow_time}] seconds")
if rc.controller.was_pressed(rc.controller.Button.RB):
max_speed = min(1, max_speed * 2)
rc.drive.set_max_speed(max_speed)
update_slow_time /= 2
rc.set_update_slow_time(update_slow_time)
print(f"max_speed set to [{max_speed}]")
print(f"update_slow_time set to [{update_slow_time}] seconds")
# Capture and display color images when the A button is down
if rc.controller.is_down(rc.controller.Button.A):
rc.display.show_color_image(rc.camera.get_color_image())
# Capture and display depth images when the B button is down
elif rc.controller.is_down(rc.controller.Button.B):
depth_image = rc.camera.get_depth_image()
rc.display.show_depth_image(depth_image)
depth_center_distance = rc_utils.get_depth_image_center_distance(depth_image)
print(f"Depth center distance: [{depth_center_distance:.2f}] cm")
# Capture and display Lidar data when the X button is down
elif rc.controller.is_down(rc.controller.Button.X):
lidar = rc.lidar.get_samples()
rc.display.show_lidar(lidar)
lidar_forward_distance = rc_utils.get_lidar_average_distance(lidar, 0)
print(f"LIDAR forward distance: [{lidar_forward_distance:.2f}] cm")
# Show IMU data when the Y button is pressed
if rc.controller.is_down(rc.controller.Button.Y):
a = rc.physics.get_linear_acceleration()
w = rc.physics.get_angular_velocity()
print(
f"Linear acceleration: ({a[0]:5.2f},{a[1]:5.2f},{a[2]:5.2f}); "
+ f"Angular velocity: ({w[0]:5.2f},{w[1]:5.2f},{w[2]:5.2f})"
)
def update_slow():
"""
After start() is run, this function is run at a constant rate that is slower
than update(). By default, update_slow() is run once per second
"""
# Check if each button is_down
for button in rc.controller.Button:
if rc.controller.is_down(button):
print(f"Button [{button.name}] is down")
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, update_slow)
rc.go()
| [
"66690702+github-classroom[bot]@users.noreply.github.com"
] | 66690702+github-classroom[bot]@users.noreply.github.com |
fa47f633b556f75ecc66e442fe8d82e3c675b25d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-sdrs/huaweicloudsdksdrs/v1/model/reverse_protection_group_request_body.py | 8eae3df950173a01db830f9ccf7363fab8f36972 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,564 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ReverseProtectionGroupRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'reverse_server_group': 'ReverseProtectionGroupRequestParams'
}
attribute_map = {
'reverse_server_group': 'reverse-server-group'
}
def __init__(self, reverse_server_group=None):
"""ReverseProtectionGroupRequestBody
The model defined in huaweicloud sdk
:param reverse_server_group:
:type reverse_server_group: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
self._reverse_server_group = None
self.discriminator = None
self.reverse_server_group = reverse_server_group
@property
def reverse_server_group(self):
"""Gets the reverse_server_group of this ReverseProtectionGroupRequestBody.
:return: The reverse_server_group of this ReverseProtectionGroupRequestBody.
:rtype: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
return self._reverse_server_group
@reverse_server_group.setter
def reverse_server_group(self, reverse_server_group):
"""Sets the reverse_server_group of this ReverseProtectionGroupRequestBody.
:param reverse_server_group: The reverse_server_group of this ReverseProtectionGroupRequestBody.
:type reverse_server_group: :class:`huaweicloudsdksdrs.v1.ReverseProtectionGroupRequestParams`
"""
self._reverse_server_group = reverse_server_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ReverseProtectionGroupRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
f9e1ce59ea2595225385d5cf3e7a38a4f69b189e | 5f27bc1a0460a078f6fe33a544f494a5dff7f452 | /script/old/D_0617_printTEST.py | e51523cee1881ac200bba959c53948d221a0e939 | [] | no_license | A-Why-not-fork-repositories-Good-Luck/arm_move | 3e381f0310265f47da14beaac136c358fb318f92 | e2e6182cfd93df1935bd3b8e9158134964dc44fa | refs/heads/master | 2023-03-15T18:37:17.337770 | 2020-11-18T06:46:06 | 2020-11-18T06:46:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | x = 1
y = 'a'
print "asdfasfd %s,%s" % (x, y)
| [
"[email protected]"
] | |
d12b7c803c0ee26c17895955b0173ae850f57ec0 | 96933e173dcebcd611188b6fba982ca9cf975e1c | /qa/migrations/0007_remove_question_tags.py | fc3e9673ea51d72cfd2f1b2f275a7f9e3423e08b | [] | no_license | hift/django-qa | 7d640181312c672936a6f0b7fa2f8041350a0e4f | 57bc418e0b9f611872b50968862dd469353cb050 | refs/heads/master | 2021-01-10T10:41:16.437404 | 2015-11-22T07:54:48 | 2015-11-22T07:54:48 | 45,435,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('qa', '0006_question_tags'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='tags',
),
]
| [
"="
] | = |
f4d577e2f19b9ff16f9ff16c9b44e3f73ed349cd | 0facb323be8a76bb4c168641309972fa77cbecf2 | /Configurations/HWWSemiLepHighMass/nanoAODv4v5/2016/Mix/nuisances.py | 9e45ef184499d676695781cd4842074beab43449 | [] | no_license | bhoh/SNuAnalytics | ef0a1ba9fa0d682834672a831739dfcfa1e7486b | 34d1fc062e212da152faa83be50561600819df0e | refs/heads/master | 2023-07-06T03:23:45.343449 | 2023-06-26T12:18:28 | 2023-06-26T12:18:28 | 242,880,298 | 0 | 1 | null | 2020-02-25T01:17:50 | 2020-02-25T01:17:49 | null | UTF-8 | Python | false | false | 6,767 | py | import os
SITE=os.uname()[1]
xrootdPath=''
if 'iihe' in SITE :
xrootdPath = 'dcap://maite.iihe.ac.be/'
treeBaseDir = '/pnfs/iihe/cms/store/user/xjanssen/HWW2015/'
elif 'cern' in SITE :
treeBaseDir = '/eos/cms/store/group/phys_higgs/cmshww/amassiro/HWWNano/'
elif 'sdfarm' in SITE:
xrootdPath = 'root://cms-xrdr.private.lo:2094'
treeBaseDir = "/xrootd/store/user/jhchoi/Latino/HWWNano/"
eleWP='mva_90p_Iso2016'
muWP='cut_Tight80x'
mc = [skey for skey in samples if skey != 'DATA']
nuisances['lumi_Uncorrelated'] = {
'name': 'lumi_13TeV_2016',
'type': 'lnN',
'samples': dict((skey, '1.022') for skey in mc )
}
nuisances['lumi_XYFact'] = {
'name': 'lumi_13TeV_XYFact',
'type': 'lnN',
'samples': dict((skey, '1.009') for skey in mc)
}
nuisances['lumi_BBDefl'] = {
'name': 'lumi_13TeV_BBDefl',
'type': 'lnN',
'samples': dict((skey, '1.004') for skey in mc )
}
nuisances['lumi_DynBeta'] = {
'name': 'lumi_13TeV_DynBeta',
'type': 'lnN',
'samples': dict((skey, '1.005') for skey in mc )
}
nuisances['lumi_Ghosts'] = {
'name': 'lumi_13TeV_Ghosts',
'type': 'lnN',
'samples': dict((skey, '1.004') for skey in mc )
}
for shift in ['jes', 'lf', 'hf', 'hfstats1', 'hfstats2', 'lfstats1', 'lfstats2', 'cferr1', 'cferr2']:
btag_syst = ['(btagSF%sup)/(btagSF)' % shift, '(btagSF%sdown)/(btagSF)' % shift]
name = 'CMS_btag_%s' % shift
if 'stats' in shift:
name += '_2016'
nuisances['btag_shape_%s' % shift] = {
'name': name,
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, btag_syst) for skey in mc),
}
trig_syst = ['TriggerEffWeight_1l_u/TriggerEffWeight_1l','TriggerEffWeight_1l_d/TriggerEffWeight_1l']
nuisances['trigg'] = {
'name': 'CMS_eff_hwwtrigger_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, trig_syst) for skey in mc),
}
prefire_syst = ['PrefireWeight_Up/PrefireWeight', 'PrefireWeight_Down/PrefireWeight']
nuisances['prefire'] = {
'name': 'CMS_eff_prefiring_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, prefire_syst) for skey in mc),
}
eff_e_syst = ['Lepton_tightElectron_'+eleWP+'_TotSF_Up'+'[0]/Lepton_tightElectron_'+eleWP+'_TotSF'+'[0]','Lepton_tightElectron_'+eleWP+'_TotSF_Down'+'[0]/Lepton_tightElectron_'+eleWP+'_TotSF'+'[0]']
nuisances['eff_e'] = {
'name': 'CMS_eff_e_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, eff_e_syst) for skey in mc),
}
#MCl1loose2016v5__MCCorr2016v5__METup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSel
nuisances['electronpt'] = {
'name': 'CMS_scale_e_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__ElepTup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__ElepTdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
eff_m_syst = ['Lepton_tightMuon_'+muWP+'_TotSF_Up'+'[0]/Lepton_tightMuon_'+muWP+'_TotSF'+'[0]','Lepton_tightMuon_'+muWP+'_TotSF_Down'+'[0]/Lepton_tightMuon_'+muWP+'_TotSF'+'[0]']
nuisances['eff_m'] = {
'name': 'CMS_eff_m_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, eff_m_syst) for skey in mc),
}
nuisances['muonpt'] = {
'name': 'CMS_scale_m_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__MupTup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__MupTdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['jes'] = {
'name': 'CMS_scale_j_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__JESup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__JESdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['fatjes'] = {
'name': 'CMS_scale_fatj_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMass_up__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMass_do__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['fatjer'] = {
'name': 'CMS_scale_fatjres_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMassRes_up__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__Semilep2016_whad30__CorrFatJetMass__FatJetMassRes_do__HMlnjjSelBWR/',
#'AsLnN': '1'
}
nuisances['met'] = {
'name': 'CMS_scale_met_2016',
'kind': 'tree',
'type': 'shape',
'samples': dict((skey, ['1', '1']) for skey in mc),
'folderUp': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__METup__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
'folderDown': xrootdPath+'/'+treeBaseDir+'/Summer16_102X_nAODv4_Full2016v5/MCl1loose2016v5__MCCorr2016v5__METdo__Semilep2016_whad30__CorrFatJetMass__HMlnjjSelBWR/',
#'AsLnN': '1'
}
pu_syst=['puWeightUp/puWeight','puWeightDown/puWeight']
nuisances['PU'] = {
'name': 'CMS_PU_2016',
'kind': 'weight',
'type': 'shape',
'samples': dict((skey, pu_syst) for skey in mc),
#'AsLnN': '1',
}
'''
ps_syst=['PSWeight[0]', 'PSWeight[1]', 'PSWeight[2]', 'PSWeight[3]']
nuisances['PS'] = {
'name': 'PS',
'type': 'shape',
'kind': 'weight_envelope',
'samples':dict((skey, ps_syst) for skey in mc),
#'AsLnN': '1'
}
'''
tau21_syst=['tau21SFup','tau21SFdown']
nuisances['tau21'] = {
'name': 'CMS_eff_vtag_tau21_sf_13TeV',
'type': 'lnN',
'samples': dict((skey, '1.04') for skey in mc )
#'Samples': dict((skey, tau21_syst) for skey in mc )
}
| [
"[email protected]"
] | |
b2ee99ab61869de3e0076e3216e7a06574b7fbc5 | 75e1d9446cb1fca5c6a79ad0ba7f38268df1161f | /Python Programs/both-adjacent-elements-odd-or-even.py | cc2b13fa70a753c1820a7d007e5bebba5611bfd7 | [
"CC0-1.0"
] | permissive | muhammad-masood-ur-rehman/Skillrack | 6e9b6d93680dfef6f40783f02ded8a0d4283c98a | 71a25417c89d0efab40ee6229ccd758b26ae4312 | refs/heads/main | 2023-02-03T16:45:54.462561 | 2020-12-23T08:36:28 | 2020-12-23T08:36:28 | 324,221,340 | 4 | 1 | CC0-1.0 | 2020-12-24T19:12:54 | 2020-12-24T19:12:54 | null | UTF-8 | Python | false | false | 953 | py | Both Adjacent Elements - Odd or Even
Both Adjacent Elements - Odd or Even: Given an array of N positive integers, print the positive integers that have both the adjacent element values as odd or even.
Boundary Condition(s):
3 <= N <= 1000
Input Format:
The first line contains N.
The second line contains N elements separated by space(s).
Output Format:
The first line contains the elements (which have both the adjacent element values as odd or even) separated by a space.
Example Input/Output 1:
Input:
7
10 21 20 33 98 66 29
Output:
21 20 33
Example Input/Output 2:
Input:
5
11 21 30 99 52
Output:
30 99
n=int(input())
l=list(map(int,input().split()))
for i in range(1,len(l)-1):
if( (l[i-1]%2!=0 and l[i+1]%2!=0) or (l[i-1]%2==0 and l[i+1]%2==0)):
print(l[i],end=' ')
a=int(input());l=list(map(int,input().split()))
for i in range(1,a-1):
if (l[i-1]%2 and l[i+1]%2) or (l[i-1]%2==0 and l[i+1]%2==0):
print(l[i],end=' ')
| [
"[email protected]"
] | |
226db6ef29278e9c5bed42cffc9f0ecef5632813 | 53c224a6eee8c6869bc5c292cc8783ea934f0656 | /data_generator.py | b0417b0ce56cec970cf6c200a3ac5465138a59c4 | [
"MIT"
] | permissive | kunato/Deep-Image-Matting | 05909d276dd86cc3d59eacf1865511375d6b3f54 | 84baf4ce893083a940d9bfe224515f09787e9289 | refs/heads/master | 2020-05-22T18:35:43.610713 | 2019-05-14T15:44:34 | 2019-05-14T15:44:34 | 186,475,076 | 0 | 0 | MIT | 2019-05-13T18:33:15 | 2019-05-13T18:33:14 | null | UTF-8 | Python | false | false | 5,986 | py | import math
import os
import random
from random import shuffle
import cv2 as cv
import numpy as np
from keras.utils import Sequence
from config import batch_size
from config import fg_path, bg_path, a_path
from config import img_cols, img_rows
from config import unknown_code
from utils import safe_crop
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
with open('Combined_Dataset/Training_set/training_fg_names.txt') as f:
fg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_fg_names.txt') as f:
fg_test_files = f.read().splitlines()
with open('Combined_Dataset/Training_set/training_bg_names.txt') as f:
bg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_bg_names.txt') as f:
bg_test_files = f.read().splitlines()
def get_alpha(name):
fg_i = int(name.split("_")[0])
name = fg_files[fg_i]
filename = os.path.join('data/mask', name)
alpha = cv.imread(filename, 0)
return alpha
def get_alpha_test(name):
fg_i = int(name.split("_")[0])
name = fg_test_files[fg_i]
filename = os.path.join('data/mask_test', name)
alpha = cv.imread(filename, 0)
return alpha
def composite4(fg, bg, a, w, h):
fg = np.array(fg, np.float32)
bg_h, bg_w = bg.shape[:2]
x = 0
if bg_w > w:
x = np.random.randint(0, bg_w - w)
y = 0
if bg_h > h:
y = np.random.randint(0, bg_h - h)
bg = np.array(bg[y:y + h, x:x + w], np.float32)
alpha = np.zeros((h, w, 1), np.float32)
alpha[:, :, 0] = a / 255.
im = alpha * fg + (1 - alpha) * bg
im = im.astype(np.uint8)
return im, a, fg, bg
def process(im_name, bg_name):
im = cv.imread(fg_path + im_name)
a = cv.imread(a_path + im_name, 0)
h, w = im.shape[:2]
bg = cv.imread(bg_path + bg_name)
bh, bw = bg.shape[:2]
wratio = w / bw
hratio = h / bh
ratio = wratio if wratio > hratio else hratio
if ratio > 1:
bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv.INTER_CUBIC)
return composite4(im, bg, a, w, h)
def generate_trimap(alpha):
fg = np.array(np.equal(alpha, 255).astype(np.float32))
# fg = cv.erode(fg, kernel, iterations=np.random.randint(1, 3))
unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
unknown = cv.dilate(unknown, kernel, iterations=np.random.randint(1, 20))
trimap = fg * 255 + (unknown - fg) * 128
return trimap.astype(np.uint8)
# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions.
def random_choice(trimap, crop_size=(320, 320)):
crop_height, crop_width = crop_size
y_indices, x_indices = np.where(trimap == unknown_code)
num_unknowns = len(y_indices)
x, y = 0, 0
if num_unknowns > 0:
ix = np.random.choice(range(num_unknowns))
center_x = x_indices[ix]
center_y = y_indices[ix]
x = max(0, center_x - int(crop_width / 2))
y = max(0, center_y - int(crop_height / 2))
return x, y
class DataGenSequence(Sequence):
def __init__(self, usage):
self.usage = usage
filename = '{}_names.txt'.format(usage)
with open(filename, 'r') as f:
self.names = f.read().splitlines()
np.random.shuffle(self.names)
def __len__(self):
return int(np.ceil(len(self.names) / float(batch_size)))
def __getitem__(self, idx):
i = idx * batch_size
length = min(batch_size, (len(self.names) - i))
batch_x = np.empty((length, img_rows, img_cols, 4), dtype=np.float32)
batch_y = np.empty((length, img_rows, img_cols, 2), dtype=np.float32)
for i_batch in range(length):
name = self.names[i]
fcount = int(name.split('.')[0].split('_')[0])
bcount = int(name.split('.')[0].split('_')[1])
im_name = fg_files[fcount]
bg_name = bg_files[bcount]
image, alpha, fg, bg = process(im_name, bg_name)
# crop size 320:640:480 = 1:1:1
different_sizes = [(320, 320), (480, 480), (640, 640)]
crop_size = random.choice(different_sizes)
trimap = generate_trimap(alpha)
x, y = random_choice(trimap, crop_size)
image = safe_crop(image, x, y, crop_size)
alpha = safe_crop(alpha, x, y, crop_size)
trimap = generate_trimap(alpha)
# Flip array left to right randomly (prob=1:1)
if np.random.random_sample() > 0.5:
image = np.fliplr(image)
trimap = np.fliplr(trimap)
alpha = np.fliplr(alpha)
batch_x[i_batch, :, :, 0:3] = image / 255.
batch_x[i_batch, :, :, 3] = trimap / 255.
mask = np.equal(trimap, 128).astype(np.float32)
batch_y[i_batch, :, :, 0] = alpha / 255.
batch_y[i_batch, :, :, 1] = mask
i += 1
return batch_x, batch_y
def on_epoch_end(self):
np.random.shuffle(self.names)
def train_gen():
return DataGenSequence('train')
def valid_gen():
return DataGenSequence('valid')
def shuffle_data():
num_fgs = 431
num_bgs = 43100
num_bgs_per_fg = 100
num_valid_samples = 8620
names = []
bcount = 0
for fcount in range(num_fgs):
for i in range(num_bgs_per_fg):
names.append(str(fcount) + '_' + str(bcount) + '.png')
bcount += 1
from config import num_valid_samples
valid_names = random.sample(names, num_valid_samples)
train_names = [n for n in names if n not in valid_names]
shuffle(valid_names)
shuffle(train_names)
with open('valid_names.txt', 'w') as file:
file.write('\n'.join(valid_names))
with open('train_names.txt', 'w') as file:
file.write('\n'.join(train_names))
if __name__ == '__main__':
filename = 'merged/357_35748.png'
bgr_img = cv.imread(filename)
bg_h, bg_w = bgr_img.shape[:2]
print(bg_w, bg_h)
| [
"[email protected]"
] | |
2e3c056ddb9c2a6b10f4f8034e24097ff42c81da | 2dfbb97b47fd467f29ffb26faf9a9f6f117abeee | /leetcode/1191.py | 9521fa28400a3f61456c8bb3b23adb9a49256601 | [] | no_license | liuweilin17/algorithm | 0e04b2d36dfb6b7b1b0e0425daf69b62273c54b5 | d3e8669f932fc2e22711e8b7590d3365d020e189 | refs/heads/master | 2020-12-30T11:03:40.085105 | 2020-04-10T03:46:01 | 2020-04-10T03:46:01 | 98,844,919 | 3 | 1 | null | 2018-10-05T03:01:02 | 2017-07-31T03:35:14 | C++ | UTF-8 | Python | false | false | 1,534 | py | ###########################################
# Let's Have Some Fun
# File Name: 1191.py
# Author: Weilin Liu
# Mail: [email protected]
# Created Time: Sun Sep 15 11:17:08 2019
###########################################
#coding=utf-8
#!/usr/bin/python
#1191. K-Concatenation Maximum Sum
class Solution:
# notice that Kadane's algorithm is used to find the maximum sum of subarray in O(n) time
def kConcatenationMaxSum(self, arr: List[int], k: int) -> int:
N = len(arr)
if N < 1 or k < 1:
return 0
max_so_far = 0
# case 1&2, max arr is in arr or in 2 arr
new_arr = arr if k == 1 else arr * 2
max_end_here = 0
for a in new_arr:
max_end_here = max(a, a+max_end_here)
max_so_far = max(max_end_here, max_so_far)
sum_v = sum(arr)
if sum_v > 0 and k > 2: # several arr in the middle and we remove the smallest prefix and postfix of the first arr and last arr respectively
print(">0")
# minimum prefix sum
min_pre = 0
t = 0
for i in range(N):
t += arr[i]
min_pre = min(min_pre, t)
# minimum postfix sum
min_post = 0
t = 0
for i in range(N-1, -1, -1):
t += arr[i]
min_post = min(min_post, t)
print(min_pre, min_post)
max_so_far = max(max_so_far, sum_v * k - min_pre - min_post)
return max_so_far % (pow(10, 9) + 7)
| [
"[email protected]"
] | |
6366a51b34c9707afc49632e677013a815ca55db | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /jSjjhzRg5MvTRPabx_19.py | eb3db41a4579e7f71b9bfce3b4dbb68b86841701 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | """
Given a list of strings (nouns), list them up in a complete sentence.
### Examples
sentence(["orange", "apple", "pear"]) ➞ "An orange, an apple and a pear."
sentence(["keyboard", "mouse"]) ➞ "A keyboard and a mouse."
sentence(["car", "plane", "truck", "boat"]) ➞ "A car, a plane, a truck and a boat."
### Notes
* The sentence starts with a **capital letter**.
* Do not change **the order** of the words.
* **A/An** should be correct in all places.
* Put commas between nouns, except between the last two (there you put "and").
* The sentence ends with a `.`
* There are at least two nouns given.
* Every given word is lowercase.
"""
def sentence(nouns):
nouns = [
"an " + noun if noun[0] in "aeiou" else "a " + noun
for noun in nouns
]
nouns[-1] = "and " + nouns[-1] + "."
nouns[0] = nouns[0][1:]
return "A" + ", ".join(nouns[:-1]) + " " + nouns[-1]
| [
"[email protected]"
] | |
133ecc63c3b8010b2e081e25503fe33369029499 | f9a5e7233875989f994438ce267907d8210d60a1 | /test/pump_sensor/metalearning/knn_ranking/RMSE/k=5/univariate_statistical_test_F-test/sensor_prediction_F-test_AUCROC.py | 07f3ad3de4c6f7b7527d340a0b8f360e9da3c1b9 | [] | no_license | renoslyssiotis/When-are-Machine-learning-models-required-and-when-is-Statistics-enough | da8d53d44a69f4620954a32af3aacca45e1ed641 | 6af1670a74345f509c86b7bdb4aa0761c5b058ff | refs/heads/master | 2022-08-29T20:21:57.553737 | 2020-05-26T18:03:46 | 2020-05-26T18:03:46 | 256,439,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | import sys, os, pickle
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
sys.path.append(str(p.parents[7])+'/metalearners/knn_ranking_method/RMSE')
from KNN_ranking_k_5_RMSE import KNN_ranking
#Load the selected meta-dataset after performing zero-variance threshold
with open(str(p.parents[7])+'/analysis/feature_selection/univariate_selection/ANOVA_X_f1_202.pickle', 'rb') as handle:
metadataset_feature_selected = pickle.load(handle)
#=====================META-FEATURE EXTRACTION==================================
with open(str(p.parents[5])+'/actual/sensor_metafeatures_202.pickle', 'rb') as handle:
meta_features = pickle.load(handle)
#nested_results is a nested dictionary with all the AUC-ROC performances for each dataset and all models
with open(str(p.parents[6])+'/nested_results_roc.pickle', 'rb') as handle:
nested_results_roc = pickle.load(handle)
"""
Remove the meta-features which are not in the meta-dataset
(i.e. the features which have not been selected in the feature selection process)
"""
metafeatures_to_be_removed = []
for metafeature in meta_features.keys():
if metafeature in metadataset_feature_selected.columns:
pass
else:
metafeatures_to_be_removed.append(metafeature)
[meta_features.pop(key) for key in metafeatures_to_be_removed]
#========================META-LEARNING: RANKING================================
#KNN Ranking Method
top1, top2, top3 = KNN_ranking(metadataset_feature_selected, meta_features, nested_results_roc)
print("==========================================")
print(" AUC-ROC ")
print("==========================================")
print("Top 1 predicted model: " + top1)
print("Top 2 predicted model: " + top2)
print("Top 3 predicted model: " + top3)
#Actual results
with open(str(p.parents[5])+'/actual/sensor_top_3_roc.pickle', 'rb') as handle:
actual_results = pickle.load(handle)
print("==========================================")
print("Top 1 ACTUAL model: " + actual_results[0])
print("Top 2 ACTUAL model: " + actual_results[1])
print("Top 3 ACTUAL model: " + actual_results[2]) | [
"[email protected]"
] | |
ccdaa4456177883986864fd3be8c8e5ff907ebe3 | 5e944167564f1c85431b2244cb9181a058b0ceeb | /homework1/exercise1.py | cdcc8f97bf7ceb91f7685291401cf5dc92596cd6 | [] | no_license | uwhpsc-2016/homework1_solution | 4519ddec35e29b0b15561cd5b066a593edb3c499 | d2b68d2c6aaf6a84d34405ec3b352a6ecc6c346c | refs/heads/master | 2020-12-26T21:37:31.910378 | 2016-05-05T02:58:46 | 2016-05-05T02:58:46 | 55,077,710 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 804 | py | def collatz_step(n):
"""Returns the result of the Collatz function.
The Collatz function C : N -> N is used in `collatz` to
generate collatz sequences.
Parameters
----------
n : int
Returns
-------
int
"""
if (n < 1):
raise ValueError('n must be >= 1')
if (n == 1):
return 1
if (n % 2 == 0):
return n/2
elif (n % 2 == 1):
return 3*n + 1
def collatz(n):
"""Returns the Collatz sequence beginning with `n`.
It is conjectured that Collatz sequences all end with `1`.
Parameters
----------
n : int
Returns
-------
sequence : list
A Collatz sequence.
"""
sequence = [n]
while (n > 1):
n = collatz_step(n)
sequence.append(n)
return sequence
| [
"[email protected]"
] | |
6ff1497c503be08c386828ec59da8a6dcd17b03b | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLX/_glgets.py | 76bcbaca63d3763cb619e8e125e243064e9565cc | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 279 | py | """glGet* auto-generation of output arrays (DO NOT EDIT, AUTOGENERATED)"""
try:
from OpenGL.raw.GL._lookupint import LookupInt as _L
except ImportError:
def _L(*args):
raise RuntimeError( "Need to define a lookupint for this api" )
_glget_size_mapping = _m = {}
| [
"[email protected]"
] | |
683a3f3492255066d1463b56418bfee089e6a0e2 | 779be2cae4bcfa24d47c2f681dd77bd2419099e9 | /atcoder_py/Archive/ABC204_A_20210606.py | b1a175022a1720a474395bc6e55552573ed9b8db | [] | no_license | ynanigashi/til | f72d586427981c5b4543546193f8bfcb0d65a8c8 | 9de4453300ea5b084f4348a3e25ccedb0627d10c | refs/heads/master | 2023-08-10T18:30:44.599474 | 2021-09-26T12:50:39 | 2021-09-26T12:50:39 | 274,137,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | x, y = map(int, input().split())
if x == y:
print(x)
else:
print(3 - x -y) | [
"[email protected]"
] | |
5a8684e4aee28c9d8a04f66bf08b0763bd885b1b | d04f2c6d22ec189cd725cf2e7c882e841cbada67 | /nonlineer-3.py | 5098029431ddac53df11179b3f63fffc6f3d2471 | [
"Unlicense"
] | permissive | nyucel/numerical-methods | e2d0c13b7ae752da4d765bc76a04499ad998da6f | 14824fa3b85b4337b9c95c0b79b2b91a644ac18d | refs/heads/master | 2021-12-15T11:55:12.250619 | 2018-04-17T18:58:21 | 2018-04-17T18:58:21 | 82,589,234 | 52 | 83 | Unlicense | 2023-08-15T22:28:25 | 2017-02-20T18:28:56 | Python | UTF-8 | Python | false | false | 254 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
def f(x,y):
return(x**2+x*y-10)
def g(x,y):
return(y+3*x*y**2-57)
xi = float(input("x için başlangıç değerini girin: "))
yi = float(input("y için başlangıç değerini girin: "))
print(xi,yi)
| [
"[email protected]"
] | |
5123cbb3e967f205b9a8fe82e3a467da31dd9ff5 | 1508b3e3f56e750e38db4334343beedcbb2f9c95 | /519/client.py | 20c724b5ee583cac67e228e1cc2fa1481b071e6d | [] | no_license | kellyseeme/pythonexample | 3bb325e31c677160c1abd6c3f314f7ef3af55daa | 3eab43cdfa5c59a0f4553de84c9de21e5ded44bb | refs/heads/master | 2021-01-21T13:52:43.076697 | 2016-05-30T06:32:37 | 2016-05-30T06:32:37 | 51,348,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | #!/usr/bin/env python
import socket
import time
import processbar
import time
HOST = '192.168.1.60'
PORT = 9999
def recv_all(socketobj,file_name,file_size):
f = open(file_name,'w')
while file_size > 0:
if file_size <= 1024:
processbar.progressbar(10,10)
data = socketobj.recv(1024)
f.write(data)
break
elif file_size > 1024:
processbar.progressbar(1024*10/file_size,10)
data = socketobj.recv(1024)
f.write(data)
file_size -= 1024
f.close()
s = socket.socket()
s.connect((HOST,PORT))
while True:
commands = raw_input('>>>')
if commands == 'exit' or not commands:break
s.sendall(commands)
options = commands.strip().split(' ')
if len(options) == 2:
file_name = options[1]
if options[0] == 'put':
f = open(file_name)
data = f.read()
time.sleep(0.2)
s.send(str(len(data)))
time.sleep(0.2)
s.send(data)
print s.recv(1024)
elif options[0] == 'get':
file_size = int(s.recv(1024))
recv_all(s,file_name,file_size)
print s.recv(1024)
else:
pass
| [
"root@python.(none)"
] | root@python.(none) |
1356e86996b557a0fb21231df0e57fbd65351d5c | f99cca94f74c69bc518e298c14140534e18eabd3 | /OrcApi/Run/Test/TestServiceRun.py | 9c2a51fb2de8a7362bf3f8d3d40beca6dcf424bc | [] | no_license | pubselenium/OrcTestToolsKit | d6d838d9937d2c4d86941e317cb3ff096b58e52d | f3ccbbceaed4f4996f6907a2f4880c2fd3f82bbb | refs/heads/master | 2021-04-29T05:15:53.240714 | 2016-12-30T09:42:53 | 2016-12-30T09:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 704 | py | import unittest
from OrcLib.LibTest import OrcTest
from OrcApi.Run.RunDefMod import RunCore
class TestService(unittest.TestCase):
def test_get_case_list(self):
"""
Get page usl
:return:
"""
OrcTest.test_print_begin()
_service = RunCore()
_service.search_list("CASE", 2000000001)
_service.save_list("./ccc.xml")
OrcTest.test_print_end()
def test_get_batch_list(self):
"""
Get page usl
:return:
"""
OrcTest.test_print_begin()
_service = RunCore()
_service.search_list("batch", 1000000008)
_service.save_list("./abc.xml")
OrcTest.test_print_end() | [
"[email protected]"
] | |
3313864acba61b42751b05e32ea6f94bb50c4c20 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Default/FPythonCode/ColumnDefinitionUtils.py | d2f8e37da770a652927cd264b31a059f22ee2a8e | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py |
def GetPairOffParentReference(settlement):
pairOffReference = None
if settlement.PairOffParent():
pairOffReference = settlement.PairOffParent()
if not pairOffReference:
for child in settlement.Children():
pairOffReference = GetPairOffParentReference(child)
if pairOffReference:
break
return pairOffReference
| [
"[email protected]"
] | |
0da214ab4195098228be3d27bdd5023c72c5940a | b9de33c6fb310ef69cba728b9de1a31165c3a031 | /chapter_32/class-gotchas-super-multiple-inheritance.py | e2a2df1637b44a0d8209f30663b0da3f7c77b3d2 | [] | no_license | bimri/learning-python | 2fc8c0be304d360b35020a0dfc16779f78fb6848 | 5f2fcc9a08f14e1d848530f84ce3b523d1f72aad | refs/heads/master | 2023-08-12T20:30:09.754468 | 2021-10-15T20:53:49 | 2021-10-15T20:53:49 | 377,515,946 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,261 | py | "Multiple Inheritance: Order Matters"
# Python always searches superclasses from left to right, according to their order in the header line.
# class ListTree:
# def __str__(self): ...
# class Super:
# def __str__(self): ...
# class Sub(ListTree, Super): # Get ListTree's __str__ by listing it first
# x = Sub() # Inheritance searches ListTree before Super
"""
But now suppose Super and ListTree have their own versions of other same-named
attributes, too. If we want one name from Super and another from ListTree, the order
in which we list them in the class header won’t help—we will have to override inheritance
by manually assigning to the attribute name in the Sub class:
"""
# class ListTree:
# def __str__(self): ...
# def other(self): ...
# class Super:
# def __str__(self): ...
# def other(self): ...
# class Sub(ListTree, Super): # Get ListTree's __str__ by listing it first
# other = Super.other # But explicitly pick Super's version of other
# def __init__(self):
# ...
# x = Sub() # Inheritance searches Sub before ListTree/Super
"""
Here, the assignment to other within the Sub class creates Sub.other—a reference back
to the Super.other object. Because it is lower in the tree, Sub.other effectively hides
ListTree.other, the attribute that the inheritance search would normally find. Similarly,
if we listed Super first in the class header to pick up its other, we would need to
select ListTree’s method explicitly:
"""
# class Sub(Super, ListTree): # Get Super's other by order
# __str__ = Lister.__str__ # Explicitly pick Lister.__str__
"Scopes in Methods and Classes"
def generate():
class Spam: # Spam is a name in generate's local scope
count = 1
def method(self):
print(Spam.count) # Visible in generate's scope, per LEGB rule (E)
return Spam()
generate().method()
def generate():
return Spam()
class Spam(): # Define at top level of module
count = 1
def method(self):
print(Spam.count) # Visible in module, per LEGB rule (E)
generate().method()
def generate(label): # Returns a class instead of instance
class Spam: # Define in module scope
count = 1
def method(self):
print("%s=%s" % (label, Spam.count))
return Spam
if __name__ == "__main__":
aClass = generate("Gotchas") # Generate a class
I = aClass() # Create an instance of the class
I.method() # Call the method
| [
"[email protected]"
] | |
1ad0170d04bbb39c701cdfaf7bd3851389d77a8f | 149a3d107891201582653d00cce23678bea91c59 | /tipagem.py | 619b4d71a6997a7fd87f130c2b30cb39bd03b529 | [] | no_license | LondonComputadores/dio_es6_essentials_recap_jul19 | 8dfd25c8e2bcf1efed4f1f7f8b26481e82eefdde | c440161cae8c2e919f5bd9912110bbd8e1d920c5 | refs/heads/master | 2020-06-19T16:22:34.391088 | 2019-07-16T00:38:06 | 2019-07-16T00:38:06 | 196,781,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | meuNumero = 20
meuTexto = "String"
# Error is on purpose just for comparison
print(meuNumero + " " + meuTexto) | [
"[email protected]"
] | |
313570f597eb15ae6444830bf79aed976b250e96 | 80a3d98eae1d755d6914b5cbde63fd10f5cc2046 | /autox/autox_video/mmaction2/mmaction/models/localizers/ssn.py | 3136d651f6d76f4be04410605d7dcf7a2d0a34a4 | [
"Apache-2.0"
] | permissive | 4paradigm/AutoX | efda57b51b586209e1d58e1dab7d0797083aadc5 | 7eab9f4744329a225ff01bb5ec360c4662e1e52e | refs/heads/master | 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 | Apache-2.0 | 2022-07-12T08:28:09 | 2021-07-21T09:45:41 | Jupyter Notebook | UTF-8 | Python | false | false | 5,160 | py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from .. import builder
from ..builder import LOCALIZERS
from .base import BaseTAGClassifier
@LOCALIZERS.register_module()
class SSN(BaseTAGClassifier):
"""Temporal Action Detection with Structured Segment Networks.
Args:
backbone (dict): Config for building backbone.
cls_head (dict): Config for building classification head.
in_channels (int): Number of channels for input data.
Default: 3.
spatial_type (str): Type of spatial pooling.
Default: 'avg'.
dropout_ratio (float): Ratio of dropout.
Default: 0.5.
loss_cls (dict): Config for building loss.
Default: ``dict(type='SSNLoss')``.
train_cfg (dict | None): Config for training. Default: None.
test_cfg (dict | None): Config for testing. Default: None.
"""
def __init__(self,
backbone,
cls_head,
in_channels=3,
spatial_type='avg',
dropout_ratio=0.5,
loss_cls=dict(type='SSNLoss'),
train_cfg=None,
test_cfg=None):
super().__init__(backbone, cls_head, train_cfg, test_cfg)
self.is_test_prepared = False
self.in_channels = in_channels
self.spatial_type = spatial_type
if self.spatial_type == 'avg':
self.pool = nn.AvgPool2d((7, 7), stride=1, padding=0)
elif self.spatial_type == 'max':
self.pool = nn.MaxPool2d((7, 7), stride=1, padding=0)
else:
self.pool = None
self.dropout_ratio = dropout_ratio
if self.dropout_ratio != 0:
self.dropout = nn.Dropout(p=self.dropout_ratio)
else:
self.dropout = None
self.loss_cls = builder.build_loss(loss_cls)
def forward_train(self, imgs, proposal_scale_factor, proposal_type,
proposal_labels, reg_targets, **kwargs):
"""Define the computation performed at every call when training."""
imgs = imgs.reshape((-1, self.in_channels) + imgs.shape[4:])
x = self.extract_feat(imgs)
if self.pool:
x = self.pool(x)
if self.dropout is not None:
x = self.dropout(x)
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
return loss_dict
def forward_test(self, imgs, relative_proposal_list, scale_factor_list,
proposal_tick_list, reg_norm_consts, **kwargs):
"""Define the computation performed at every call when testing."""
num_crops = imgs.shape[0]
imgs = imgs.reshape((num_crops, -1, self.in_channels) + imgs.shape[3:])
num_ticks = imgs.shape[1]
output = []
minibatch_size = self.test_cfg.ssn.sampler.batch_size
for idx in range(0, num_ticks, minibatch_size):
chunk = imgs[:, idx:idx +
minibatch_size, :, :, :].view((-1, ) + imgs.shape[2:])
x = self.extract_feat(chunk)
if self.pool:
x = self.pool(x)
# Merge crop to save memory.
x = x.reshape((num_crops, x.size(0) // num_crops, -1)).mean(dim=0)
output.append(x)
output = torch.cat(output, dim=0)
relative_proposal_list = relative_proposal_list.squeeze(0)
proposal_tick_list = proposal_tick_list.squeeze(0)
scale_factor_list = scale_factor_list.squeeze(0)
reg_norm_consts = reg_norm_consts.squeeze(0)
if not self.is_test_prepared:
self.is_test_prepared = self.cls_head.prepare_test_fc(
self.cls_head.consensus.num_multipliers)
(output, activity_scores, completeness_scores,
bbox_preds) = self.cls_head(
(output, proposal_tick_list, scale_factor_list), test_mode=True)
relative_proposal_list = relative_proposal_list.cpu().numpy()
activity_scores = activity_scores.cpu().numpy()
completeness_scores = completeness_scores.cpu().numpy()
reg_norm_consts = reg_norm_consts.cpu().numpy()
if bbox_preds is not None:
bbox_preds = bbox_preds.view(-1, self.cls_head.num_classes, 2)
bbox_preds[:, :, 0] = (
bbox_preds[:, :, 0] * reg_norm_consts[1, 0] +
reg_norm_consts[0, 0])
bbox_preds[:, :, 1] = (
bbox_preds[:, :, 1] * reg_norm_consts[1, 1] +
reg_norm_consts[0, 1])
bbox_preds = bbox_preds.cpu().numpy()
result = [
dict(
relative_proposal_list=relative_proposal_list,
activity_scores=activity_scores,
completeness_scores=completeness_scores,
bbox_preds=bbox_preds)
]
return result
| [
"[email protected]"
] | |
4f7d9e2c17b601aaa0a2a0c3417e9963182cc6cf | 9b1e97850f55d839c1c6f7d93187af90bf9120a5 | /0x0F-python-object_relational_mapping/model_state.py | ee5227fff95d5feb70eb50c3b7eeefad7a80192e | [] | no_license | PilarPinto/holbertonschool-higher_level_programming | 543271fb7f85a23745f54ac44e2fd1ef0ff452ce | 8be531a14a280235c2a9cee7f072d88cea8b9921 | refs/heads/master | 2020-09-29T00:19:01.460334 | 2020-05-15T01:58:51 | 2020-05-15T01:58:51 | 226,900,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #!/usr/bin/python3
'''Using sqlalchemy for State definition'''
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class State(Base):
__tablename__ = 'states'
id = Column(Integer, unique=True, nullable=False, primary_key=True)
name = Column(String(128), nullable=False)
| [
"[email protected]"
] | |
1a7e8fc0d4c54d2afcb5cebbc52d6fda2888386d | f5ffd566166948c4202eb1e66bef44cf55a70033 | /openapi_client/model/user_acl_no_id.py | 12e3eb161421688aa7f70cd8bf579034b5b627b9 | [] | no_license | skyportal/skyportal_client | ed025ac6d23589238a9c133d712d4f113bbcb1c9 | 15514e4dfb16313e442d06f69f8477b4f0757eaa | refs/heads/master | 2023-02-10T02:54:20.757570 | 2021-01-05T02:18:03 | 2021-01-05T02:18:03 | 326,860,562 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,227 | py | """
Fritz: SkyPortal API
SkyPortal provides an API to access most of its underlying functionality. To use it, you will need an API token. This can be generated via the web application from your profile page or, if you are an admin, you may use the system provisioned token stored inside of `.tokens.yaml`. ### Accessing the SkyPortal API Once you have a token, you may access SkyPortal programmatically as follows. #### Python ```python import requests token = 'ea70a5f0-b321-43c6-96a1-b2de225e0339' def api(method, endpoint, data=None): headers = {'Authorization': f'token {token}'} response = requests.request(method, endpoint, json=data, headers=headers) return response response = api('GET', 'http://localhost:5000/api/sysinfo') print(f'HTTP code: {response.status_code}, {response.reason}') if response.status_code in (200, 400): print(f'JSON response: {response.json()}') ``` #### Command line (curl) ```shell curl -s -H 'Authorization: token ea70a5f0-b321-43c6-96a1-b2de225e0339' http://localhost:5000/api/sysinfo ``` ### Response In the above examples, the SkyPortal server is located at `http://localhost:5000`. In case of success, the HTTP response is 200: ``` HTTP code: 200, OK JSON response: {'status': 'success', 'data': {}, 'version': '0.9.dev0+git20200819.84c453a'} ``` On failure, it is 400; the JSON response has `status=\"error\"` with the reason for the failure given in `message`: ```js { \"status\": \"error\", \"message\": \"Invalid API endpoint\", \"data\": {}, \"version\": \"0.9.1\" } ``` # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 0.9.dev0+git20201221.76627dd
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class UserACLNoID(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'user_id': (int,), # noqa: E501
'acl_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'user_id': 'user_id', # noqa: E501
'acl_id': 'acl_id', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, user_id, acl_id, *args, **kwargs): # noqa: E501
"""UserACLNoID - a model defined in OpenAPI
Args:
user_id (int):
acl_id (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.user_id = user_id
self.acl_id = acl_id
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
237f5c9434aa81b5dc82ca5b556e349347c56299 | 3ae73fa03a2e99bb108a923606c293674b3db304 | /Django/beltreview bck_up/apps/login_reg/migrations/0001_initial.py | aeabc3aa8caef9da17f0c79b92cb6f91ada3f336 | [] | no_license | asdfkerub/DojoAssignments | 51bef584783d799469db85ff66983bac4f404e7f | 1eb0b5fa8ac881ce6d0b6765b104f806bdb71f5c | refs/heads/master | 2021-01-11T16:47:51.207662 | 2017-03-06T01:11:28 | 2017-03-06T01:11:28 | 79,671,651 | 0 | 0 | null | 2017-03-06T01:11:29 | 2017-01-21T20:43:04 | Python | UTF-8 | Python | false | false | 875 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 20:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45)),
('alias', models.CharField(max_length=45)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
54e239592280383f005aee0b8c80612e8cc24ee2 | e05e2d26e38ce80530e3458ce3c8e02f16e5cbe6 | /CoinAnalysis/vs_non.py | 9d16ac66cce5ad8d5800e2c994c9734be85bd377 | [] | no_license | jegutman/hearthstone_decks | 96acca7e040cb9b89253a867217655ce8cdf2756 | 95d4563c46618a9efccc10dbb34094258ec5bce7 | refs/heads/master | 2020-12-25T08:16:29.892068 | 2019-09-12T05:28:16 | 2019-09-12T05:28:16 | 102,289,609 | 3 | 0 | null | 2018-07-06T22:35:17 | 2017-09-03T19:40:44 | Python | UTF-8 | Python | false | false | 2,182 | py | from archetypes import aggro
archetypes = []
data = {}
line_data = []
decks = []
with open('CoinData.csv') as f:
for line in f:
if line[0] == "#":
continue
tmp = line.strip().split(',')
deck_a, deck_b, first, pct, games = tmp
if deck_a not in aggro or deck_b in aggro: continue
if deck_a not in decks:
decks.append(deck_a)
print(deck_a)
for d in (deck_a, deck_b):
if d not in archetypes:
assert d != '10', line
archetypes.append(d)
if d not in data:
data[d] = {}
if deck_b not in data[deck_a]:
data[deck_a][deck_b] = [(), ()]
first = int(first)
has_coin = 1 - first
pct = float(pct)
games = int(games)
data[deck_a][deck_b][first] = (pct, games)
line_data.append((deck_a, deck_b, first, pct, games))
diffs = {}
deck_stats = {}
games_count = {}
for deck_a, deck_b, first, pct, games in line_data:
key = (deck_a, first)
deck_stats[key] = deck_stats.get(key, 0) + int(round(pct * games / 100))
games_count[key] = games_count.get(key, 0) + games
overall = []
for i in decks:
pct_1 = round(float(deck_stats[(i, 1)] / games_count[(i,1)]) * 100, 1)
pct_0 = round(float(deck_stats[(i, 0)] / games_count[(i,0)]) * 100, 1)
#min_g = min(games_count[(i,1)], games_count[(i,0)])
g_1 = games_count[(i,1)]
g_0 = games_count[(i,0)]
diff = round(pct_1 - pct_0, 1)
#print("%-25s" % i, pct_1, pct_0, "%5.1f" % diff, "%6s" % min_g)
#overall.append((i, pct_1, pct_0, diff, min_g))
overall.append((i, pct_1, pct_0, diff, g_1, g_0))
#for i, pct_1, pct_0, diff, min_g in sorted(overall, key=lambda x:x[-2], reverse=True):
# print("%-25s" % i, pct_1, pct_0, "%5.1f" % diff, "%6s" % min_g)
i, pct_1, pct_0, diff, g_1, g_0 = "deck,1st ,2nd ,diff,g_1,g_2".split(',')
print("%-25s" % i, pct_1, pct_0, "%5s" % diff, "%6s" % g_1, "%6s" % g_0)
for i, pct_1, pct_0, diff, g_1, g_0 in sorted(overall, key=lambda x:x[3], reverse=True):
print("%-25s" % i.replace(' ', '_'), pct_1, pct_0, "%5.1f" % diff, "%6s" % g_1, "%6s" % g_0)
| [
"[email protected]"
] | |
554fb914629e5e2cba22ade77f00e4a6143b04ab | bdce502dce36a5f53ed7e376c5783c8bcbe6a98e | /migrations/versions/55bd2e159b91_added_type_to_coach.py | 0b8c796002c87ae489801a7c293d32952ecaf4fb | [
"MIT"
] | permissive | jeffthemaximum/jeffPD | b05b02300653b34c235adb2de46c91e18604bcf4 | 4ac2584117c45c70b77bebe64676b0138577e14f | refs/heads/master | 2021-01-01T19:38:30.041034 | 2015-11-17T14:35:57 | 2015-11-17T14:35:57 | 41,260,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | """added type to coach
Revision ID: 55bd2e159b91
Revises: 1abfb1cdc0ea
Create Date: 2015-10-10 11:52:37.381983
"""
# revision identifiers, used by Alembic.
revision = '55bd2e159b91'
down_revision = '1abfb1cdc0ea'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('coaches', sa.Column('coach_type', sa.String(length=64), nullable=True))
op.create_index('ix_logs_timestamp', 'logs', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_logs_timestamp', 'logs')
op.drop_column('coaches', 'coach_type')
### end Alembic commands ###
| [
"[email protected]"
] | |
8ec2fb53dc07004d77a1ec97066dcbe63aa762c7 | 165e706d485e90f4e4f63cfb9f2c35acda14cfc0 | /safemrl/algorithm/agents.py | c13f34a7a21b223bf7f2ffd034e82299bd8873fd | [
"Apache-2.0"
] | permissive | Tarkiyah/googleResearch | 65581f3bbbe2ffe248c9e613c0ea7eac336d5372 | dea327aa9e7ef7f7bca5a6c225dbdca1077a06e9 | refs/heads/master | 2022-12-07T12:04:44.153221 | 2019-11-21T16:03:48 | 2019-11-21T16:18:28 | 223,229,888 | 11 | 2 | Apache-2.0 | 2022-11-21T21:39:10 | 2019-11-21T17:38:31 | Jupyter Notebook | UTF-8 | Python | false | false | 9,870 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF-Agents policies, networks, and helpers.
Custom TF-Agents policies, networks, and helpers for Safe SAC.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import gin
import numpy as np
import tensorflow as tf
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.policies import actor_policy
from tf_agents.policies import boltzmann_policy
from tf_agents.spec import tensor_spec
from tf_agents.utils import nest_utils
def process_replay_buffer(replay_buffer, max_ep_len=500, k=1, as_tensor=False):
"""Process replay buffer to infer safety rewards with episode boundaries."""
rb_data = replay_buffer.gather_all()
rew = rb_data.reward
boundary_idx = np.where(rb_data.is_boundary().numpy())[1]
last_idx = 0
k_labels = []
for term_idx in boundary_idx:
# TODO(krshna): remove +1?
fail = 1 - int(term_idx - last_idx >= max_ep_len + 1)
ep_rew = tf.gather(rew, np.arange(last_idx, term_idx), axis=1)
labels = np.zeros(ep_rew.shape_as_list()) # ignore obs dim
labels[:, Ellipsis, -k:] = fail
k_labels.append(labels)
last_idx = term_idx
flat_labels = np.concatenate(k_labels, axis=-1).astype(np.float32)
n_flat_labels = flat_labels.shape[1]
n_rews = rb_data.reward.shape_as_list()[1]
safe_rew_labels = np.pad(
flat_labels, ((0, 0), (0, n_rews - n_flat_labels)), mode='constant')
if as_tensor:
return tf.to_float(safe_rew_labels)
return safe_rew_labels
# Pre-processor layers to remove observation from observation dict returned by
# goal-conditioned point-mass environment.
@gin.configurable
def extract_obs_merge_w_ac_layer():
def f(layer_input):
return tf.keras.layers.concatenate(
[layer_input[0]['observation'], layer_input[1]], axis=1)
return tf.keras.layers.Lambda(f)
@gin.configurable
def extract_observation_layer():
return tf.keras.layers.Lambda(lambda obs: obs['observation'])
# CriticNetwork constructed with EncodingNetwork
@gin.configurable
class CriticNetwork(network.Network):
"""Critic Network."""
def __init__(
self,
input_tensor_spec,
preprocessing_combiner=None,
joint_fc_layer_params=None,
joint_dropout_layer_params=None,
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_in', distribution='uniform'),
activation_fn=tf.nn.relu,
name='CriticNetwork'):
"""Creates an instance of `CriticNetwork`.
Args:
input_tensor_spec: A tuple of (observation, action) each a nest of
`tensor_spec.TensorSpec` representing the inputs.
preprocessing_combiner: Combiner layer for obs and action inputs
joint_fc_layer_params: Optional list of fully connected parameters after
merging observations and actions, where each item is the number of units
in the layer.
joint_dropout_layer_params: Optional list of dropout layer parameters,
each item is the fraction of input units to drop or a dictionary of
parameters according to the keras.Dropout documentation. The additional
parameter `permanent', if set to True, allows to apply dropout at
inference for approximated Bayesian inference. The dropout layers are
interleaved with the fully connected layers; there is a dropout layer
after each fully connected layer, except if the entry in the list is
None. This list must have the same length of joint_fc_layer_params, or
be None.
kernel_initializer: Initializer to use for the kernels of the conv and
dense layers. If none is provided a default glorot_uniform
activation_fn: Activation function, e.g. tf.nn.relu, slim.leaky_relu, ...
name: A string representing name of the network.
Raises:
ValueError: If `observation_spec` or `action_spec` contains more than one
observation.
"""
observation_spec, action_spec = input_tensor_spec
if (len(tf.nest.flatten(observation_spec)) > 1 and
preprocessing_combiner is None):
raise ValueError('Only a single observation is supported by this network')
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError('Only a single action is supported by this network')
self._single_action_spec = flat_action_spec[0]
preprocessing_layers = None
# combiner assumes a single batch dimension, without time
super(CriticNetwork, self).__init__(
input_tensor_spec=input_tensor_spec, state_spec=(), name=name)
self._encoder = encoding_network.EncodingNetwork(
input_tensor_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
fc_layer_params=joint_fc_layer_params,
dropout_layer_params=joint_dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=False)
self._value_layer = tf.keras.layers.Dense(
1,
activation=None,
kernel_initializer=tf.keras.initializers.RandomUniform(
minval=-0.003, maxval=0.003),
name='value')
def call(self, observations, step_type, network_state=()):
state, network_state = self._encoder(
observations, step_type=step_type, network_state=network_state)
q_val = self._value_layer(state)
return tf.reshape(q_val, [-1]), network_state
@gin.configurable
class SafeActorPolicyRSVar(actor_policy.ActorPolicy):
"""Returns safe actions by rejection sampling with increasing variance."""
def __init__(self,
time_step_spec,
action_spec,
actor_network,
safety_critic_network=None,
safety_threshold=0.1,
info_spec=(),
observation_normalizer=None,
clip=True,
resample_metric=None,
name=None):
super(SafeActorPolicyRSVar,
self).__init__(time_step_spec, action_spec, actor_network, info_spec,
observation_normalizer, clip, name)
self._safety_critic_network = safety_critic_network
self._safety_threshold = safety_threshold
self._resample_metric = resample_metric
def _apply_actor_network(self, time_step, policy_state):
has_batch_dim = time_step.step_type.shape.as_list()[0] > 1
observation = time_step.observation
if self._observation_normalizer:
observation = self._observation_normalizer.normalize(observation)
actions, policy_state = self._actor_network(observation,
time_step.step_type,
policy_state)
if has_batch_dim:
return actions, policy_state
# samples "best" safe action out of 50
sampled_ac = actions.sample(50)
obs = nest_utils.stack_nested_tensors(
[time_step.observation for _ in range(50)])
q_val, _ = self._safety_critic_network((obs, sampled_ac),
time_step.step_type)
fail_prob = tf.nn.sigmoid(q_val)
safe_ac_mask = fail_prob < self._safety_threshold
safe_ac_idx = tf.where(safe_ac_mask)
resample_count = 0
while resample_count < 4 and not safe_ac_idx.shape.as_list()[0]:
if self._resample_metric is not None:
self._resample_metric()
resample_count += 1
scale = actions.scale * 1.5 # increase variance by constant 1.5
actions = self._actor_network.output_spec.build_distribution(
loc=actions.loc, scale=scale)
sampled_ac = actions.sample(50)
q_val, _ = self._safety_critic_network((obs, sampled_ac),
time_step.step_type)
fail_prob = tf.nn.sigmoid(q_val)
safe_ac_idx = tf.where(tf.squeeze(fail_prob) < self._safety_threshold)
if not safe_ac_idx.shape.as_list()[0]: # return safest action
safe_ac_idx = tf.math.argmin(fail_prob)
return sampled_ac[safe_ac_idx], policy_state
actions = tf.squeeze(tf.gather(sampled_ac, safe_ac_idx))
fail_prob_safe = tf.gather(fail_prob, safe_ac_idx)
safe_idx = tf.math.argmax(fail_prob_safe)
return actions[safe_idx], policy_state
BoltzmannPolicyInfo = collections.namedtuple('BoltzmannPolicyInfo',
('temperature',))
@gin.configurable
class SafetyBoltzmannPolicy(boltzmann_policy.BoltzmannPolicy):
"""A policy that awares safety."""
def __init__(self, policy, temperature=1.0, name=None):
super(SafetyBoltzmannPolicy, self).__init__(policy, temperature, name)
info_spec = BoltzmannPolicyInfo(
temperature=tensor_spec.TensorSpec((), tf.float32, name='temperature'))
self._info_spec = info_spec
self._setup_specs() # run again to make sure specs are correctly updated
def _distribution(self, time_step, policy_state):
distribution_step = super(SafetyBoltzmannPolicy,
self)._distribution(time_step, policy_state)
distribution_step = distribution_step._replace(
info=BoltzmannPolicyInfo(temperature=self._temperature))
return distribution_step
| [
"[email protected]"
] | |
979ad3afa724f60333fec8a0444de42bd250d08f | 9a46784244d544445c01c6f0d564f4da65efcfaf | /CodeUltimateFlaskCourse/06. Member API/authentication/app.py | a94fe21a10ee8b8a8bc2c9372c5c21fd4617e606 | [] | no_license | ammbyrne/Flask | f55a606ec234c6a00b4d264a48e11b2f487d4ef7 | 7922ab46b8a4c388346043d2393173e7e49e43bb | refs/heads/main | 2023-04-19T16:07:08.224824 | 2021-05-07T03:21:44 | 2021-05-07T03:21:44 | 365,101,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,120 | py | from flask import Flask, g, request, jsonify
from database import get_db
app = Flask(__name__)
api_username = 'admin'
api_password = 'password'
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
@app.route('/member', methods=['GET'])
def get_members():
db = get_db()
members_cur = db.execute('select id, name, email, level from members')
members = members_cur.fetchall()
return_values = []
for member in members:
member_dict = {}
member_dict['id'] = member['id']
member_dict['name'] = member['name']
member_dict['email'] = member['email']
member_dict['level'] = member['level']
return_values.append(member_dict)
username = request.authorization.username
password = request.authorization.password
if username == api_username and password == api_password:
return jsonify({'members' : return_values, 'username' : username, 'password' : password})
return jsonify({'message' : 'Authentication failed!'}), 403
@app.route('/member/<int:member_id>', methods=['GET'])
def get_member(member_id):
db = get_db()
member_cur = db.execute('select id, name, email, level from members where id = ?', [member_id])
member = member_cur.fetchone()
return jsonify({'member' : {'id' : member['id'], 'name' : member['name'], 'email' : member['email'], 'level' : member['level']}})
@app.route('/member', methods=['POST'])
def add_member():
new_member_data = request.get_json()
name = new_member_data['name']
email = new_member_data['email']
level = new_member_data['level']
db = get_db()
db.execute('insert into members (name, email, level) values (?, ?, ?)', [name, email, level])
db.commit()
member_cur = db.execute('select id, name, email, level from members where name = ?', [name])
new_member = member_cur.fetchone()
return jsonify({'member' : {'id' : new_member['id'], 'name' : new_member['name'], 'email' : new_member['email'], 'level' : new_member['level']}})
@app.route('/member/<int:member_id>', methods=['PUT', 'PATCH'])
def edit_member(member_id):
new_member_data = request.get_json()
name = new_member_data['name']
email = new_member_data['email']
level = new_member_data['level']
db = get_db()
db.execute('update members set name = ?, email = ?, level = ? where id = ?', [name, email, level, member_id])
db.commit()
member_cur = db.execute('select id, name, email, level from members where id = ?', [member_id])
member = member_cur.fetchone()
return jsonify({'member' : {'id' : member['id'], 'name' : member['name'], 'email' : member['email'], 'level' : member['level']}})
@app.route('/member/<int:member_id>', methods=['DELETE'])
def delete_member(member_id):
db = get_db()
db.execute('delete from members where id = ?', [member_id])
db.commit()
return jsonify({'message' : 'The member has been deleted!'})
if __name__ == '__main__':
app.run(debug=True) | [
"[email protected]"
] | |
d21741515b51c9b3f25b2293bec7070258246c98 | a829617f9ad158df80a569dd02a99c53639fa2c6 | /test/hep/table/exception1.py | 826950668cd50b0628b9344a6d73d01ea3f9fb31 | [] | no_license | alexhsamuel/pyhep | 6db5edd03522553c54c8745a0e7fe98d96d2b7ae | c685756e9065a230e2e84c311a1c89239c5d94de | refs/heads/master | 2021-01-10T14:24:08.648081 | 2015-10-22T13:18:50 | 2015-10-22T13:18:50 | 44,745,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | #-----------------------------------------------------------------------
# imports
#-----------------------------------------------------------------------
from __future__ import division
import hep.table
from hep.test import compare, assert_
#-----------------------------------------------------------------------
# test
#-----------------------------------------------------------------------
schema = hep.table.Schema()
schema.addColumn("x", "float32")
schema.addColumn("y", "float32")
table = hep.table.create("exception1.table", schema)
table.append(x=5, y=2)
table.append(x=3, y=4)
table.append(x=0, y=4)
table.append(x=3, y=0)
table.append(x=4, y=3)
sum = 0
def callback(value, weight):
global sum
assert_(weight == 1)
sum += value
hep.table.project(table, [("x / y", callback)],
handle_expr_exceptions=True)
compare(sum, 5 / 2 + 3 / 4 + 0 + 4 / 3)
| [
"[email protected]"
] | |
5b22188159510783109706d9d6aee73b30184cd5 | 7ac1f3e38dab2899d6dc0d02cc1ace3934fb0805 | /pygame/tank_game/code.py | 250360ff8a4e559005ce17747cd5bdc67f609b6e | [] | no_license | amanbhal/pythonCodes | 3fd9357211fe7d06c6972e7a4f469df1ff3cf60a | 49d17ce395d15e7c8497af8455790ecb876a0d49 | refs/heads/master | 2016-08-12T06:12:19.108863 | 2015-11-16T20:42:11 | 2015-11-16T20:42:11 | 46,301,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | num = int(raw_input())
bnum = bin(num)
convert = []
for x in bnum[2:]:
if x=='0':
convert.append('1')
else:
convert.append('0')
print convert
convert = "".join(convert)
result = int(convert,2)
print result | [
"[email protected]"
] | |
b8830ee9a2275eae167cf660353d0f991769fe44 | 19f1612a24a343198302fe1b88d15a2d94a5d91f | /Mod_Divmod.py | 67fedd76cd0160039410121cd5b1209d8232ae5e | [] | no_license | TheShubham-K/HackerRank | 0a8f15051e5466292d880ba3d334bc19733c4ab7 | a51bcfa4dee85258787cc5bc96976045b05a963f | refs/heads/master | 2022-11-09T16:24:35.595762 | 2020-06-29T15:56:39 | 2020-06-29T15:56:39 | 266,571,298 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | from __future__ import division
n = int(input())
m = int(input())
ans = divmod(n,m)
print(str(ans[0])+"\n"+str(ans[1])+"\n"+str(ans))
| [
"[email protected]"
] | |
516a9d17099ff146518573d7cc216f94fe6fd594 | 7fa02f8480152e2656b083ef99a17741533f254c | /pyscf/ao2mo/incore.py | c8d03c4811e9e0d1db54621c40df8797c0eecceb | [
"Apache-2.0"
] | permissive | kcbhamu/pyscf | b59c66974f36578c35150afd1967d23b78dad35a | adbdcbdd426356485d9a249d7461f668d19d6f9e | refs/heads/master | 2022-10-14T07:38:56.826857 | 2020-06-12T17:13:45 | 2020-06-12T17:13:45 | 272,482,387 | 1 | 0 | Apache-2.0 | 2020-06-15T15:57:28 | 2020-06-15T15:57:27 | null | UTF-8 | Python | false | false | 9,496 | py | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import sys
import numpy
import ctypes
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
BLOCK = 56
def full(eri_ao, mo_coeff, verbose=0, compact=True, **kwargs):
r'''MO integral transformation for the given orbital.
Args:
eri_ao : ndarray
AO integrals, can be either 8-fold or 4-fold symmetry.
mo_coeff : ndarray
Transform (ij|kl) with the same set of orbitals.
Kwargs:
verbose : int
Print level
compact : bool
When compact is True, the returned MO integrals have 4-fold
symmetry. Otherwise, return the "plain" MO integrals.
Returns:
2D array of transformed MO integrals. The MO integrals may or may not
have the permutation symmetry (controlled by the kwargs compact)
Examples:
>>> from pyscf import gto
>>> from pyscf.scf import _vhf
>>> from pyscf import ao2mo
>>> mol = gto.M(atom='O 0 0 0; H 0 1 0; H 0 0 1', basis='sto3g')
>>> eri = mol.intor('int2e_sph', aosym='s8')
>>> mo1 = numpy.random.random((mol.nao_nr(), 10))
>>> eri1 = ao2mo.incore.full(eri, mo1)
>>> print(eri1.shape)
(55, 55)
>>> eri1 = ao2mo.incore.full(eri, mo1, compact=False)
>>> print(eri1.shape)
(100, 100)
'''
return general(eri_ao, (mo_coeff,)*4, verbose, compact)
# It consumes two times of the memory needed by MO integrals
def general(eri_ao, mo_coeffs, verbose=0, compact=True, **kwargs):
r'''For the given four sets of orbitals, transfer the 8-fold or 4-fold 2e
AO integrals to MO integrals.
Args:
eri_ao : ndarray
AO integrals, can be either 8-fold or 4-fold symmetry.
mo_coeffs : 4-item list of ndarray
Four sets of orbital coefficients, corresponding to the four
indices of (ij|kl)
Kwargs:
verbose : int
Print level
compact : bool
When compact is True, depending on the four oribital sets, the
returned MO integrals has (up to 4-fold) permutation symmetry.
If it's False, the function will abandon any permutation symmetry,
and return the "plain" MO integrals
Returns:
2D array of transformed MO integrals. The MO integrals may or may not
have the permutation symmetry, depending on the given orbitals, and
the kwargs compact. If the four sets of orbitals are identical, the
MO integrals will at most have 4-fold symmetry.
Examples:
>>> from pyscf import gto
>>> from pyscf.scf import _vhf
>>> from pyscf import ao2mo
>>> mol = gto.M(atom='O 0 0 0; H 0 1 0; H 0 0 1', basis='sto3g')
>>> eri = mol.intor('int2e_sph', aosym='s8')
>>> mo1 = numpy.random.random((mol.nao_nr(), 10))
>>> mo2 = numpy.random.random((mol.nao_nr(), 8))
>>> mo3 = numpy.random.random((mol.nao_nr(), 6))
>>> mo4 = numpy.random.random((mol.nao_nr(), 4))
>>> eri1 = ao2mo.incore.general(eri, (mo1,mo2,mo3,mo4))
>>> print(eri1.shape)
(80, 24)
>>> eri1 = ao2mo.incore.general(eri, (mo1,mo2,mo3,mo3))
>>> print(eri1.shape)
(80, 21)
>>> eri1 = ao2mo.incore.general(eri, (mo1,mo2,mo3,mo3), compact=False)
>>> print(eri1.shape)
(80, 36)
>>> eri1 = ao2mo.incore.general(eri, (mo1,mo1,mo2,mo2))
>>> print(eri1.shape)
(55, 36)
>>> eri1 = ao2mo.incore.general(eri, (mo1,mo2,mo1,mo2))
>>> print(eri1.shape)
(80, 80)
'''
nao = mo_coeffs[0].shape[0]
if eri_ao.size == nao**4:
return lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri_ao.reshape([nao]*4),
mo_coeffs[0].conj(), mo_coeffs[1],
mo_coeffs[2].conj(), mo_coeffs[3])
# transform e1
eri1 = half_e1(eri_ao, mo_coeffs, compact)
klmosym, nkl_pair, mokl, klshape = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
if eri1.shape[0] == 0 or nkl_pair == 0:
# 0 dimension causes error in certain BLAS implementations
return numpy.zeros((eri1.shape[0],nkl_pair))
# if nij_pair > nkl_pair:
# log.warn('low efficiency for AO to MO trans!')
# transform e2
eri1 = _ao2mo.nr_e2(eri1, mokl, klshape, aosym='s4', mosym=klmosym)
return eri1
def half_e1(eri_ao, mo_coeffs, compact=True):
r'''Given two set of orbitals, half transform the (ij| pair of 8-fold or
4-fold AO integrals (ij|kl)
Args:
eri_ao : ndarray
AO integrals, can be either 8-fold or 4-fold symmetry.
mo_coeffs : list of ndarray
Two sets of orbital coefficients, corresponding to the i, j
indices of (ij|kl)
Kwargs:
compact : bool
When compact is True, the returned MO integrals uses the highest
possible permutation symmetry. If it's False, the function will
abandon any permutation symmetry, and return the "plain" MO
integrals
Returns:
ndarray of transformed MO integrals. The MO integrals may or may not
have the permutation symmetry, depending on the given orbitals, and
the kwargs compact.
Examples:
>>> from pyscf import gto
>>> from pyscf import ao2mo
>>> mol = gto.M(atom='O 0 0 0; H 0 1 0; H 0 0 1', basis='sto3g')
>>> eri = mol.intor('int2e_sph', aosym='s8')
>>> mo1 = numpy.random.random((mol.nao_nr(), 10))
>>> mo2 = numpy.random.random((mol.nao_nr(), 8))
>>> eri1 = ao2mo.incore.half_e1(eri, (mo1,mo2))
>>> eri1 = ao2mo.incore.half_e1(eri, (mo1,mo2))
>>> print(eri1.shape)
(80, 28)
>>> eri1 = ao2mo.incore.half_e1(eri, (mo1,mo2), compact=False)
>>> print(eri1.shape)
(80, 28)
>>> eri1 = ao2mo.incore.half_e1(eri, (mo1,mo1))
>>> print(eri1.shape)
(55, 28)
'''
eri_ao = numpy.asarray(eri_ao, order='C')
nao, nmoi = mo_coeffs[0].shape
nmoj = mo_coeffs[1].shape[1]
nao_pair = nao*(nao+1)//2
ijmosym, nij_pair, moij, ijshape = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
ijshape = (ijshape[0], ijshape[1]-ijshape[0],
ijshape[2], ijshape[3]-ijshape[2])
eri1 = numpy.empty((nij_pair,nao_pair))
if nij_pair == 0:
return eri1
if eri_ao.size == nao_pair**2: # 4-fold symmetry
# half_e1 first transforms the indices which are contiguous in memory
# transpose the 4-fold integrals to make ij the contiguous indices
eri_ao = lib.transpose(eri_ao)
ftrans = _ao2mo.libao2mo.AO2MOtranse1_incore_s4
elif eri_ao.size == nao_pair*(nao_pair+1)//2:
ftrans = _ao2mo.libao2mo.AO2MOtranse1_incore_s8
else:
raise NotImplementedError
if ijmosym == 's2':
fmmm = _ao2mo.libao2mo.AO2MOmmm_nr_s2_s2
elif nmoi <= nmoj:
fmmm = _ao2mo.libao2mo.AO2MOmmm_nr_s2_iltj
else:
fmmm = _ao2mo.libao2mo.AO2MOmmm_nr_s2_igtj
fdrv = getattr(_ao2mo.libao2mo, 'AO2MOnr_e1incore_drv')
buf = numpy.empty((BLOCK, nij_pair))
for p0, p1 in lib.prange(0, nao_pair, BLOCK):
fdrv(ftrans, fmmm,
buf.ctypes.data_as(ctypes.c_void_p),
eri_ao.ctypes.data_as(ctypes.c_void_p),
moij.ctypes.data_as(ctypes.c_void_p),
ctypes.c_int(p0), ctypes.c_int(p1-p0),
ctypes.c_int(nao),
ctypes.c_int(ijshape[0]), ctypes.c_int(ijshape[1]),
ctypes.c_int(ijshape[2]), ctypes.c_int(ijshape[3]))
eri1[:,p0:p1] = buf[:p1-p0].T
return eri1
def iden_coeffs(mo1, mo2):
return (id(mo1) == id(mo2) or
(mo1.shape==mo2.shape and abs(mo1-mo2).max() < 1e-13))
def _conc_mos(moi, moj, compact=False):
if numpy.result_type(moi, moj) != numpy.double:
compact = False
nmoi = moi.shape[1]
nmoj = moj.shape[1]
if compact and iden_coeffs(moi, moj):
ijmosym = 's2'
nij_pair = nmoi * (nmoi+1) // 2
moij = numpy.asarray(moi, order='F')
ijshape = (0, nmoi, 0, nmoi)
else:
ijmosym = 's1'
nij_pair = nmoi * nmoj
moij = numpy.asarray(numpy.hstack((moi,moj)), order='F')
ijshape = (0, nmoi, nmoi, nmoi+nmoj)
return ijmosym, nij_pair, moij, ijshape
if __name__ == '__main__':
from pyscf import scf
from pyscf import gto
mol = gto.Mole()
mol.verbose = 5
mol.output = 'out_h2o'
mol.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = {'H': 'cc-pvtz',
'O': 'cc-pvtz',}
mol.build()
rhf = scf.RHF(mol)
rhf.scf()
import time
print(time.clock())
eri0 = full(rhf._eri, rhf.mo_coeff)
print(abs(eri0).sum()-5384.460843787659) # should = 0
eri0 = general(rhf._eri, (rhf.mo_coeff,)*4)
print(abs(eri0).sum()-5384.460843787659)
print(time.clock())
| [
"[email protected]"
] | |
1e3807db28c7349317eeba39285686bc12b95757 | 50948d4cb10dcb1cc9bc0355918478fb2841322a | /azure-mgmt-storage/azure/mgmt/storage/v2019_04_01/models/immutability_policy_properties_py3.py | feb15f507cfde35317d9e13017493cd6dc5c0c5a | [
"MIT"
] | permissive | xiafu-msft/azure-sdk-for-python | de9cd680b39962702b629a8e94726bb4ab261594 | 4d9560cfd519ee60667f3cc2f5295a58c18625db | refs/heads/master | 2023-08-12T20:36:24.284497 | 2019-05-22T00:55:16 | 2019-05-22T00:55:16 | 187,986,993 | 1 | 0 | MIT | 2020-10-02T01:17:02 | 2019-05-22T07:33:46 | Python | UTF-8 | Python | false | false | 2,478 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ImmutabilityPolicyProperties(Model):
"""The properties of an ImmutabilityPolicy of a blob container.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param immutability_period_since_creation_in_days: Required. The
immutability period for the blobs in the container since the policy
creation, in days.
:type immutability_period_since_creation_in_days: int
:ivar state: The ImmutabilityPolicy state of a blob container, possible
values include: Locked and Unlocked. Possible values include: 'Locked',
'Unlocked'
:vartype state: str or
~azure.mgmt.storage.v2019_04_01.models.ImmutabilityPolicyState
:ivar etag: ImmutabilityPolicy Etag.
:vartype etag: str
:ivar update_history: The ImmutabilityPolicy update history of the blob
container.
:vartype update_history:
list[~azure.mgmt.storage.v2019_04_01.models.UpdateHistoryProperty]
"""
_validation = {
'immutability_period_since_creation_in_days': {'required': True},
'state': {'readonly': True},
'etag': {'readonly': True},
'update_history': {'readonly': True},
}
_attribute_map = {
'immutability_period_since_creation_in_days': {'key': 'properties.immutabilityPeriodSinceCreationInDays', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'update_history': {'key': 'updateHistory', 'type': '[UpdateHistoryProperty]'},
}
def __init__(self, *, immutability_period_since_creation_in_days: int, **kwargs) -> None:
super(ImmutabilityPolicyProperties, self).__init__(**kwargs)
self.immutability_period_since_creation_in_days = immutability_period_since_creation_in_days
self.state = None
self.etag = None
self.update_history = None
| [
"[email protected]"
] | |
8c492766c5f8adb62877bbbcc99d29864d40fc45 | 5a29fbaa46a71eff0ac677b42e393b449e313085 | /upsea/Ea_11_Dma_pg_01/EA/Analyzer.py | 2ce51633ca7eb8fbc4a8352f2e2861911de253d8 | [
"MIT"
] | permissive | UpSea/PyAlgoTradeMid | 548d181d5d18448f75f205214e9d19b7356a5730 | c8edcbc089d92dbfbb8bb25af92a039146f6c6da | refs/heads/master | 2021-01-20T19:57:21.406976 | 2016-07-25T17:23:00 | 2016-07-25T17:23:00 | 62,429,518 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,460 | py | import numpy as np
import matplotlib.dates as mpd
import sys,os
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,os.pardir,os.pardir,'thirdParty','pyqtgraph-0.9.10'))
sys.path.append(xpower)
import pyqtgraph as pg
xpower = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'BaseClass'))
sys.path.append(xpower)
from midBaseAnalyzer import midBaseAnalyzer as midBaseAnalyzer
class Analyzer(midBaseAnalyzer):
#----------------------------------------------------------------------
def indicatorsPlot(self,ax):
""""""
date = np.array([mpd.date2num(date) for date in self.results.index])
if 'long_ema' in self.results and 'short_ema' in self.results:
ax.plot(date,self.results['long_ema'])
ax.plot(date,self.results['short_ema'])
def signalPlot(self,ax,yBuy = None,ySell = None):
date = np.array([mpd.date2num(date) for date in self.results.index])
if 'buy' in self.results and 'sell' in self.results:
if(yBuy == None or ySell == None):
if 'long_ema' in self.results and 'long_ema' in self.results:
yBuy = np.array(self.results['long_ema'][self.results.buy])
ySell = np.array(self.results['long_ema'][self.results.sell])
#yBuy = np.array(self.results.long_ema)
#ySell = np.array(self.results.long_ema)
if(yBuy is not None or ySell is not None):
if 'long_ema' in self.results and 'long_ema' in self.results:
xBuy = np.array([mpd.date2num(date) for date in self.results.ix[self.results.buy].index])
for x1,y1 in zip(xBuy,yBuy):
a1 = pg.ArrowItem(angle=90, tipAngle=60, headLen=5, tailLen=0, tailWidth=5, pen={'color': 'r', 'width': 1})
ax.addItem(a1)
a1.setPos(x1,y1)
xSell = np.array([mpd.date2num(date) for date in self.results.ix[self.results.sell].index])
for x1,y1 in zip(xSell,ySell):
a1 = pg.ArrowItem(angle=-90, tipAngle=60, headLen=5, tailLen=0, tailWidth=5, pen={'color': 'g', 'width': 1})
ax.addItem(a1)
a1.setPos(x1,y1)
| [
"[email protected]"
] | |
3e799491be2198eeecb6afab23a3bc4df7ac236a | d785e993ed65049c82607a1482b45bddb2a03dda | /nano2017/cfg_fr_2018/ZZTo4L_ext2_cfg.py | ada0a293740e2508a83fed849b413d5fd23bc72b | [] | no_license | PKUHEPEWK/ssww | eec02ad7650014646e1bcb0e8787cf1514aaceca | a507a289935b51b8abf819b1b4b05476a05720dc | refs/heads/master | 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'ZZTo4L_ext2_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_fr_2018.sh'
config.JobType.inputFiles = ['crab_script_fr_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/ZZTo4L_TuneCP5_13TeV_powheg_pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16_ext2-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano_fr_2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'ZZTo4L_ext2_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
| [
"[email protected]"
] | |
f74eb99bfc1bda4bca7cabb334cbd30400f2bc04 | 73e53e16fc1557447ac8b6d280d916adaa36c846 | /server | e15c737999e7404d2c732e48fad8b6e4ab1df6f6 | [] | no_license | apkallum/monadical.com | abc92cdd6ce49c7d6024df710ec67de102c787ed | 7b8fa76072ad0eeae2cb515591b345ce29a64dd6 | refs/heads/master | 2020-08-24T18:53:32.470134 | 2019-10-22T03:42:32 | 2019-10-22T03:42:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | #!/usr/bin/env python3
import sys
import json
from datetime import datetime
from flask import Flask, render_template, redirect
### Config
app = Flask(__name__)
CONFIG_FILE = 'content.json'
HOST = 'http://127.0.0.1:5000'
def load_config(fname=CONFIG_FILE):
"""read the content.json file and load it as a dictionary"""
with open(fname, 'r') as f:
return json.load(f)
CONFIG = load_config(CONFIG_FILE)
PAGES = {page['url']: page for page in list(CONFIG['PAGES'].values())} # {url: {page_data}}
POSTS = {post['url']: post for post in list(CONFIG['POSTS'].values())} # {url: {post_data}}
### Routes
# Similar to wordpress, pages and posts are separate. Every page has its own template
# in templates/page.html, but all posts use the same template + an iframe URL for the
# post content
@app.route('/')
def index():
return redirect("/index.html")
@app.route('/favicon.ico')
def favicon():
return redirect("/static/favicon.ico")
@app.route('/<path>')
def render_page(path):
page = PAGES[f'/{path}']
return render_template(page['template'], now=datetime.now(), **CONFIG, **page)
@app.route('/posts/<path>')
def render_post(path):
print(path)
post = POSTS[f'/posts/{path}']
return render_template('post.html', now=datetime.now(), **CONFIG, **post)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--pages':
# just print list of page urls
print('\n'.join(HOST + url for url in PAGES.keys()))
elif len(sys.argv) > 1 and sys.argv[1] == '--posts':
# just print list of post urls
print('\n'.join(HOST + url for url in POSTS.keys()))
else:
# run the flask http server
app.run()
| [
"[email protected]"
] | ||
5f05a3951089f1baf3863c3630cf00d923676bdb | 37f1563cdacf4b37b5b927b892538218aae79c77 | /hard/array/firstMissingPositive.py | b4c1f73b2ee27e1d1e11c3720b64cf11a4bd523c | [] | no_license | unsortedtosorted/elgoog | 9dee49a20f981305910a8924d86e8f2a16fe14c2 | 5be9fab24c0c1fd9d5dc7a7bdaca105f1ca873ee | refs/heads/master | 2020-04-15T00:51:12.114249 | 2019-05-19T04:37:24 | 2019-05-19T04:37:24 | 164,254,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 709 | py | """
41. First Missing Positive
Runtime : O(N)
"""
class Solution(object):
def firstMissingPositive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 1
for i, num in enumerate(nums):
while i + 1 != nums[i] and 0 < nums[i] <= len(nums):
v = nums[i]
nums[i] = nums[v - 1]
nums[v - 1] = v
if nums[i] == nums[v - 1]:
break
for i, num in enumerate(nums, 1):
if num != i:
return i
return len(nums) + 1
| [
"[email protected]"
] | |
55412c60ad3960f8a8780d3ffcf2369eac11a1b9 | 98c86ee65aac21c8363f627f99b9da3acd777b35 | /Actividades en Clases/Actividad 04/Solución/botella.py | 43df3345b727c6c4c291d9e75fc5811d85f464a2 | [] | no_license | bcsaldias/syllabus | ef7e5eff0c8fc1ab5a28d12cc3f18ae998ad5c52 | ce30d74fc62861c3464301b5277ca68545209371 | refs/heads/master | 2021-01-24T01:11:22.739918 | 2015-03-20T00:24:54 | 2015-03-20T00:24:54 | 32,551,385 | 1 | 0 | null | 2015-03-19T23:20:50 | 2015-03-19T23:20:50 | null | UTF-8 | Python | false | false | 350 | py | __author__ = 'patricio_lopez'
class Botella:
def __init__(self, litros=1):
self.litros = litros
@property
def etiqueta(self):
return "DCC-Cola"
def beber(self):
print("Deliciosa bebida {}".format(self.etiqueta))
def __str__(self):
return "{} de {} litros.".format(self.etiqueta, self.litros)
| [
"[email protected]"
] | |
cbe81a3493a79fc65b094d0b27ab6eec20764273 | 638929e3a47b9ea8c0cc98336edca104c6af5e3a | /lib_catalog/catalog/migrations/0001_initial.py | 172abe90b7e0935c09374bc69fe83df2b3708d7c | [] | no_license | burbaljaka/lib_catalog | 190e944c798c8d80685c5c9a65b663fa116f5404 | 15e971b6d17dfc8f01959ba538b304969c0f51a9 | refs/heads/master | 2023-06-01T06:39:47.841908 | 2022-05-17T19:52:46 | 2022-05-17T19:52:46 | 217,097,504 | 0 | 1 | null | 2023-05-07T02:35:21 | 2019-10-23T15:57:50 | JavaScript | UTF-8 | Python | false | false | 3,038 | py | # Generated by Django 3.0.5 on 2020-05-02 10:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('author_code', models.CharField(blank=True, max_length=200, null=True)),
],
),
migrations.CreateModel(
name='BBK',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=20)),
('description', models.CharField(blank=True, max_length=2000, null=True)),
],
),
migrations.CreateModel(
name='IssueCity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='KeyWord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='PublishingHouse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('issue_year', models.IntegerField(blank=True, null=True)),
('description', models.CharField(blank=True, max_length=2000, null=True)),
('place', models.CharField(blank=True, max_length=200, null=True)),
('pages', models.IntegerField(blank=True, null=True)),
('author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.Author')),
('bbk', models.ManyToManyField(blank=True, to='catalog.BBK')),
('issue_city', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.IssueCity')),
('keywords', models.ManyToManyField(blank=True, to='catalog.KeyWord')),
('publishing_house', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='catalog.PublishingHouse')),
],
),
]
| [
"[email protected]"
] | |
c4e9905bd883b4712ef399d33cc6ee6c5c84f343 | c04d4692492e898573c69a9a90ac071b291a3840 | /keys.py | 187fa2f831ecf6c0cd12eab3f6fe35060a85bd78 | [] | no_license | namhyun-gu/SmuLibraryAppEngine | 1c698e21561a3a3df2d2d12c360aea7375a5b33a | 36b2d8e6057b26fb96a543a4c4ff529da9d8a5e4 | refs/heads/master | 2021-01-18T02:28:52.302652 | 2016-04-04T07:09:35 | 2016-04-04T07:09:35 | 37,923,529 | 0 | 0 | null | 2020-06-06T14:54:32 | 2015-06-23T14:21:45 | Python | UTF-8 | Python | false | false | 120 | py | #!/usr/bin/env python
__author__ = 'namhyun'
# memcache key
ROOM_LIST_KEY = 'roomlist_cache'
ROOM_KEY = 'room_%d_cache' | [
"[email protected]"
] | |
7e125b5f5c7032b1a6311ab010c2cd68bed0d063 | 4fdb8e90ab2bed9bc534155806314d4b6d0047ae | /database/migrations/0009_auto_20150224_1925.py | e56dc8666a88493c29a8c0aee5569d0da248cc57 | [] | no_license | gbriones1/MODB | 8ca04df5bc665d5b3dcc3a4f89fa167b21047d7d | b2aa15efe155a1e813917c720107c33cb56eef1b | refs/heads/master | 2021-01-18T23:21:16.545638 | 2016-07-28T15:42:37 | 2016-07-28T15:42:37 | 32,994,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0008_auto_20150223_1141'),
]
operations = [
migrations.RenameField(
model_name='lending_product',
old_name='lending_reg',
new_name='lending',
),
migrations.AddField(
model_name='product',
name='stock',
field=models.IntegerField(default=0),
preserve_default=False,
),
migrations.AlterField(
model_name='product',
name='is_used',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
1236b634dc1a2d71b4f154a6a96f2a43ec0635f3 | cdd6e64cb8a60156157eca1812224cb38036e0c6 | /5일차/9655 돌게임/hwan9655.py | 4076b7b60d3999d5f444390d6b078db290ff3e4e | [] | no_license | kkbwilldo/boostcamp | abb0371f28a98b78a8e254021433680cf9bfdab0 | 47090c261c0ca1e3eca503c8332640d5721e7750 | refs/heads/master | 2023-04-02T00:02:24.834732 | 2021-03-31T07:53:50 | 2021-03-31T07:53:50 | 341,049,036 | 0 | 0 | null | 2021-03-02T12:58:58 | 2021-02-22T01:45:38 | Python | UTF-8 | Python | false | false | 102 | py | from sys import stdin
N = int(stdin.readline())
if N % 2 == 1:
print("SK")
else:
print("CY") | [
"[email protected]"
] | |
64ac34971a4420b2371d118512fb9cd91ef116ce | 5a9464a9d1543a072adf9c0dc07cbf3d3f5e5207 | /src/examples_sensor_tests.py | 45016d5ba083c576414ced1228d1329e54f55497 | [] | no_license | moyersjm/rosebotics2 | e4f30196fd153a3ef86de4157cbfef31b6622096 | 15a008fba44b770540ea943fe495629a368af560 | refs/heads/master | 2020-04-02T13:45:37.339009 | 2018-11-14T19:43:53 | 2018-11-14T19:43:53 | 154,495,978 | 1 | 1 | null | 2018-10-24T12:20:01 | 2018-10-24T12:20:01 | null | UTF-8 | Python | false | false | 2,216 | py | """
Capstone Project. Code for testing basics.
Author: David Mutchler, based on work by Dave Fisher and others.
Fall term, 2018-2019.
"""
import rosebotics_even_newer as rb
import time
def main():
""" Runs tests. """
run_test_sensors()
def run_test_sensors():
""" Print sensor values each time the user presses the ENTER key. """
robot = rb.Snatch3rRobot()
while True:
print()
print("Touch sensor (value, is_pressed):",
robot.touch_sensor.get_value(),
robot.touch_sensor.is_pressed())
print("Color sensor (reflected intensity, color):",
robot.color_sensor.get_reflected_intensity(),
robot.color_sensor.get_color())
print("Camera:", robot.camera.get_biggest_blob())
print("Brick buttons:",
robot.brick_button_sensor.is_back_button_pressed(),
robot.brick_button_sensor.is_top_button_pressed(),
robot.brick_button_sensor.is_bottom_button_pressed(),
robot.brick_button_sensor.is_left_button_pressed(),
robot.brick_button_sensor.is_right_button_pressed())
# ----------------------------------------------------------------------
# On each run, use just ONE of the following 3 sensors:
# ----------------------------------------------------------------------
print("Proximity sensor (inches):",
robot.proximity_sensor.get_distance_to_nearest_object_in_inches())
# print("Beacon sensor (cm, degrees):",
# robot.beacon_sensor.get_distance_to_beacon(),
# robot.beacon_sensor.get_heading_to_beacon())
# print("Beacon button sensor (top/bottom red, top/bottom blue):",
# robot.beacon_button_sensor.is_top_red_button_pressed(),
# robot.beacon_button_sensor.is_bottom_red_button_pressed(),
# robot.beacon_button_sensor.is_top_blue_button_pressed(),
# robot.beacon_button_sensor.is_bottom_blue_button_pressed())
character = input(
"Press the ENTER (return) key to get next sensor reading, or q to quit: ")
if character == "q":
break
main()
| [
"[email protected]"
] | |
54448b37275c1c6533fe3de3b724a8161ddad67e | caceb60f71165772b6d6155f619e79189e7c80a9 | /第一期/上海-棒棒糖/第二次任务-每日代码练习/2017-1/1-24/str__test.py | 92856b7febfee9c2c466704f18d9f9ea1ffca57c | [
"Apache-2.0"
] | permissive | beidou9313/deeptest | ff41999bb3eb5081cdc8d7523587d7bc11be5fea | e046cdd35bd63e9430416ea6954b1aaef4bc50d5 | refs/heads/master | 2021-04-26T23:06:08.890071 | 2019-04-03T02:18:44 | 2019-04-03T02:18:44 | 123,931,080 | 0 | 0 | Apache-2.0 | 2018-03-05T14:25:54 | 2018-03-05T14:25:53 | null | UTF-8 | Python | false | false | 186 | py | class Student(object):
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
# __repr__ = __str__
#
s=Student('Michael')
print(s)
| [
"[email protected]"
] | |
6dc60740658b89808ba69fe241f7f9dd670cacca | 2acf2f926441eadb1c32879bfa6f0e800055b9d9 | /oblig6/gaussian_white_noise.py | 475e30a109b61bf14bcbcf5cec905bcee957d390 | [] | no_license | Linueks/fys2130 | 26b400bbf878ef56d26fdc618f85b62a44515eff | 761bef68476cb210266758ea00e17020e417a174 | refs/heads/main | 2023-02-24T10:05:33.951748 | 2021-01-23T13:23:34 | 2021-01-23T13:23:34 | 332,214,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | '''
Program that generates Gaussian white noise.
Each frequency component is generated randomly b/w 0 and
the a value assigne by Normal distribution.
Each phase component is generated randombly b/w 0 and 2pi.
The finished signal is Fourier transformed.
Sebastian G. Winther-Larsen (2017)
'''
import numpy as np
from matplotlib import pyplot as plt
def gaussian_white_noise(f_sample, N, f_center, f_width):
# Parameters and necessary arrays
f_sigma = f_width / 2
y = np.zeros(N, 'complex')
T = N / f_sample
t = np.linspace(0, T*(N-1)/N, N)
f = np.linspace(0, f_sample*(N-1)/N, N)
n_center = np.floor(N*f_center / (f_sample*(N-1) / N))
n_sigma = np.floor(N*f_sigma / (f_sample*(N-1) / N))
# Computations
gauss = np.exp(-(f - f_center) * (f - f_center) / (f_sigma*f_sigma))
amplitude = np.random.rand(N) * gauss
phase = np.random.rand(N) * 2*np.pi
y.real = amplitude * np.cos(phase)
y.imag = amplitude * np.sin(phase)
# Must mirror lower half to get correct result
n_half = np.round(N/2)
for i in range(int(n_half - 1)):
#if (i == 0 or i == (n_half - 2)):
#print("First index: ", N - i - 1)
#print("Second index: ", i + 1)
y[int(N - i - 1)] = np.conjugate(y[int(i + 1)])
y[int(n_half)] = y[int(n_half)].real
y[0] = 0.0
q = np.real(np.fft.ifft(y)*200)
return y, q
if __name__ == '__main__':
y, q = gaussian_white_noise(44100, 2**16, 5000, 500)
#plt.plot(y.real)
plt.plot(q)
plt.show()
| [
"[email protected]"
] | |
9faa3bfbaffa598e35f9d61fdffcf2ea69476498 | c0d9e2b2135956031bbad6abef22be5a205696db | /src/Inc/Import.py | af20358268dbff2b4c09395c2ae747c47d55b783 | [] | no_license | VladVons/py-relay | 22758805b796c23546c97f8f42c664a2ff1b4fba | c57c205c49b7bbb6a91c98ec326b02a36c3daaef | refs/heads/master | 2023-02-05T22:15:40.884753 | 2019-03-08T06:48:07 | 2019-03-08T06:48:07 | 127,041,974 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | '''
Copyright: (c) 2017, Vladimir Vons
Author: Vladimir Vons <[email protected]>
Created: 2017.10.20
License: GNU, see LICENSE for more details
Description:
Import = TDynImport()
Import.ParseDir('Plugin/Devices')
Import.GetInstance(ClassName)
TClass = Import.GetInstance(ClassName)
Result = TClass(None)
'''
import os
import sys
import re
#
from Inc.Log import Log
# nuitka --module Manager.py
# from XXX import *
class TDynImport():
def __init__(self):
self.Clear()
def Clear(self):
self.Classes = {}
def AddClass(self, aClassName, aModule, aPath = './'):
Data = self.Classes.get(aClassName)
if (Data):
Msg = Log.Print(1, 'e', self.__class__.__name__, 'AddClass()', 'Class %s already exists in' % aClassName, Data)
raise Exception(Msg)
self.Classes[aClassName] = {'Module':aModule, 'Path': aPath}
if (aPath not in sys.path):
sys.path.insert(0, aPath)
def ParseDir(self, aDir = '.'):
for Root, Dirs, Files in os.walk(aDir):
for File in Files:
FilePath = Root + '/' + File
FileName, FileExt = os.path.splitext(File)
if (FileExt == '.py'):
hFile = open(FilePath, "r")
Lines = hFile.readlines()
hFile.close()
for Line in Lines:
if ('class ' in Line):
Data = re.search('(class\s+)(.+)\(', Line)
if (Data):
ClassName = Data.group(2)
self.AddClass(ClassName, FileName, Root)
def GetAttr(self, aClassName, aModuleName):
Module = __import__(aModuleName)
Result = getattr(Module, aClassName)
return Result
def GetInstance(self, aClassName, aModuleName = ''):
if (aModuleName):
Result = self.FromModule(aClassName, aModuleName)
elif (aClassName in self.Classes):
Module = self.Classes[aClassName]['Module']
Result = self.GetAttr(aClassName, Module)
else:
Result = globals()[aClassName]
return Result
| [
"[email protected]"
] | |
b1128e67e55a4bf39f2b5f933b18923292afc875 | 14bad9231bef3ae79ba7b0c3837b34f6c89bc49b | /128.py | d3aa82690555de9a5610c6c30e95a0f891ec5564 | [] | no_license | Nagajothikp/Nagajothi | 80dc36d167a1143c203c594d25d9dc31892bd239 | 0d80c40c47d315cbcbc2d43bb2d77887d17414d4 | refs/heads/master | 2022-01-25T18:58:24.173042 | 2019-08-07T06:36:11 | 2019-08-07T06:36:11 | 194,424,732 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 36 | py | x=input("Enter a number")
print(x)
| [
"[email protected]"
] | |
b3b0f9e662f65e09d8aae750a859c059a09e7cb8 | 0ea22107790ef695ad80ddba9d6a6c1ae95e7c6e | /kalibr-cde/cde-root/opt/ros/hydro/lib/python2.7/dist-packages/geometry_msgs/msg/_PointStamped.py | b28032463564931641d9984243a9a26ef4912b22 | [] | no_license | wangrui996/camera_imu_calibration | 0f9bc0cf737641b352fa71ae9710c735da69a732 | 4296aeac1001f21502355d8ca98d4ae214e30ffc | refs/heads/main | 2023-06-19T15:19:42.618423 | 2021-07-15T06:52:20 | 2021-07-15T06:52:20 | 386,193,412 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,270 | py | """autogenerated by genpy from geometry_msgs/PointStamped.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import std_msgs.msg
class PointStamped(genpy.Message):
_md5sum = "c63aecb41bfdfd6b7e1fac37c7cbe7bf"
_type = "geometry_msgs/PointStamped"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# This represents a Point with reference coordinate frame and timestamp
Header header
Point point
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
"""
__slots__ = ['header','point']
_slot_types = ['std_msgs/Header','geometry_msgs/Point']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,point
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(PointStamped, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
else:
self.header = std_msgs.msg.Header()
self.point = geometry_msgs.msg.Point()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.point.x, _x.point.y, _x.point.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.point.x, _x.point.y, _x.point.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3d.pack(_x.point.x, _x.point.y, _x.point.z))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.point is None:
self.point = geometry_msgs.msg.Point()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 24
(_x.point.x, _x.point.y, _x.point.z,) = _struct_3d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_3d = struct.Struct("<3d")
| [
"[email protected]"
] | |
5714dbdd0d286db2b3c543a6e67af447e969ff56 | 7780f59da5cac72501b4f9b0bb0b96e8f3ded6e1 | /tests/test_cinema.py | 446816a08120ffe33fed7e2b9150514842fff8fb | [] | no_license | kimbugp/movie-bookings | 7286593b10897c27d936650e538e84e2cbd12791 | 63121b88c6e022fcb849ff7ab8da7be9844f391a | refs/heads/master | 2022-12-10T01:41:01.648098 | 2020-01-21T08:36:17 | 2020-01-21T08:36:17 | 199,335,252 | 0 | 2 | null | 2022-12-08T03:17:06 | 2019-07-28T20:43:15 | Python | UTF-8 | Python | false | false | 4,209 | py | import json
from .basetest import BaseTestCase
class TestCinema(BaseTestCase):
def test_create_cinema_fails_with_no_authentication(self, test_client):
data = json.dumps({})
response = test_client.post(
"/api/v1/cinema",
data=data,
headers={"Content-Type": "application/json"},
)
self.assertEqual(response.status_code, 401)
def test_create_cinema_with_no_permissions_fails(self, test_client):
response = test_client.post("/api/v1/cinema")
self.assertEqual(response.status_code, 401)
def test_get_cinema(self, test_client, auth_header):
response = test_client.get("/api/v1/cinema", headers=auth_header)
self.assertEqual(response.status_code, 200)
def test_create_show_time_fails_with_cinema_hall_already_filled(
self, test_client, auth_header, cinema
):
_, data = cinema
response = test_client.post(
"/api/v1/cinema", data=data, headers=auth_header
)
assert response.status_code == 400
def create_cinema_succeeds(self, cinema):
response, data = cinema
self.assertEqual(response.status_code, 201)
self.assertEqual(
response.json,
{
"seats": [
{"name": "A", "number": [1, 2]},
{"name": "B", "number": [1, 2]},
],
"id": 4,
"name": "Simon Peter",
"description": "sdfgd",
},
)
class TestUpdateCinema(BaseTestCase):
def test_update_cinema_by_id_succeeds(
self, test_client, cinema, auth_header
):
data = json.dumps(
{
"seats": [
{"name": "C", "number": [1, 2]},
{"name": "D", "number": [1, 2]},
]
}
)
response = test_client.put(
"/api/v1/cinema/1", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json,
{
"cinema": {
"id": 1,
"name": "Cinema1",
"description": "SOme data",
"seats": [
{
"id": 12,
"name": "C",
"number": "1",
"cinema_hall": 1,
},
{
"id": 13,
"name": "C",
"number": "2",
"cinema_hall": 1,
},
{
"id": 14,
"name": "D",
"number": "1",
"cinema_hall": 1,
},
{
"id": 15,
"name": "D",
"number": "2",
"cinema_hall": 1,
},
],
}
},
)
def test_update_cinema_by_id_fails(self, test_client, cinema, auth_header):
_, data = cinema
response = test_client.put(
"/api/v1/cinema/100", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 404)
def test_update_cinema_by_id_fails_wth_same_seats(
self, test_client, auth_header
):
data = json.dumps(
{
"seats": [
{"name": "A", "number": [1, 2]},
{"name": "B", "number": [1, 2]},
]
}
)
response = test_client.put(
"/api/v1/cinema/4", data=data, headers=auth_header
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json,
{
"error": " Key (name, cinema_hall, number)=(A, 4, 1) already exists.\n",
"message": "",
},
)
| [
"[email protected]"
] | |
9fb1b2820e595de7d35630b709b9417006ff18af | 431c7b40b9bb283dde761b4748fe742a70941762 | /textress/account/tests/test_views.py | bf9088f3324b66071ed0ab8724e9e0bbd276f05d | [] | no_license | aaronlelevier/textress_project | 0b01960664eff9c83703f08cf0e33177b3369236 | 927519fec52f6df54d81e393597b55b3755103dd | refs/heads/master | 2021-06-01T04:42:21.783010 | 2016-07-15T13:42:39 | 2016-07-15T13:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,731 | py | import datetime
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Group
from model_mommy import mommy
from account.forms import AcctCostForm
from account.models import (AcctStmt, TransType, AcctTrans, AcctCost,
Pricing, CHARGE_AMOUNTS, BALANCE_AMOUNTS)
from account.tests.factory import (create_acct_stmts, create_acct_stmt,
create_acct_trans, CREATE_ACCTCOST_DICT)
from main.models import Hotel
from main.tests.factory import (create_hotel, create_hotel_user, make_subaccount,
make_subaccount_live, CREATE_USER_DICT, CREATE_HOTEL_DICT, PASSWORD)
from payment.models import Customer
from sms.models import PhoneNumber
from utils import create, login_messages, alert_messages
class AccountTests(TestCase):
# Test Rending of view, template path is correct, url
# User of each permission type needed
def setUp(self):
create._get_groups_and_perms()
self.password = PASSWORD
self.hotel = create_hotel()
self.admin = create_hotel_user(self.hotel, 'admin', 'hotel_admin')
self.manager = create_hotel_user(self.hotel, 'manager', 'hotel_manager')
self.user = create_hotel_user(self.hotel, 'user')
self.ph = mommy.make(PhoneNumber, hotel=self.hotel)
self.hotel.update_twilio_phone(self.ph.sid, self.ph.phone_number)
self.customer = mommy.make(Customer)
self.hotel.update_customer(self.customer)
# private
def test_private(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('private'), follow=True)
self.assertRedirects(response, reverse('account'))
m = list(response.context['messages'])
self.assertEqual(len(m), 1)
self.assertEqual(str(m[0]), login_messages['now_logged_in'])
def test_private__logged_out(self):
response = self.client.get(reverse('private'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'), reverse('private')))
# logout
def test_logout(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('logout'), follow=True)
self.assertRedirects(response, reverse('login'))
m = list(response.context['messages'])
self.assertEqual(len(m), 1)
self.assertEqual(str(m[0]), login_messages['now_logged_out'])
def test_logout__while_logged_out(self):
response = self.client.get(reverse('logout'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'), reverse('logout')))
# login
def test_login_get(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['form'])
def test_login_post(self):
data = {'username': self.user.username, 'password': PASSWORD}
response = self.client.post(reverse('login'), data, follow=True)
self.assertRedirects(response, reverse('account'))
m = list(response.context['messages'])
self.assertEqual(len(m), 1)
self.assertEqual(str(m[0]), login_messages['now_logged_in'])
# account
def test_account__logged_in(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('account'))
self.assertEqual(response.status_code, 200)
def test_account__logged_out(self):
response = self.client.get(reverse('account'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'), reverse('account')))
def test_account__headline_context(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('account'))
self.assertTrue(response.context['headline_small'])
def test_account__no_funds_alert(self):
self.client.login(username=self.user.username, password=self.password)
subaccount = make_subaccount(self.hotel)
subaccount.active = False
subaccount.save()
self.assertFalse(self.hotel.subaccount.active)
response = self.client.get(reverse('account'))
self.assertTrue(response.context['alerts'])
self.assertIn(
alert_messages['no_funds_alert'],
response.content
)
def test_account__no_customer_alert(self):
self.client.login(username=self.user.username, password=self.password)
self.hotel.customer = None
self.hotel.save()
self.assertIsNone(self.hotel.customer)
response = self.client.get(reverse('account'))
self.assertTrue(response.context['alerts'])
self.assertIn(
alert_messages['no_customer_alert'],
response.content
)
def test_account__no_twilio_phone_number_alert(self):
self.client.login(username=self.user.username, password=self.password)
self.hotel.twilio_ph_sid = None
self.hotel.save()
self.assertIsNone(self.hotel.twilio_ph_sid)
response = self.client.get(reverse('account'))
self.assertTrue(response.context['alerts'])
self.assertIn(
alert_messages['no_twilio_phone_number_alert'],
response.content
)
# account - navbar links
def test_account__login_navbar_links(self):
self.client.login(username=self.admin.username, password=PASSWORD)
response = self.client.get(reverse('account'))
self.assertIn("My Profile", response.content)
self.assertIn(reverse('main:user_detail', kwargs={'pk': self.admin.pk}), response.content)
self.assertIn("My Guests", response.content)
self.assertIn(reverse('concierge:guest_list'), response.content)
self.assertIn("Logout", response.content)
self.assertIn(reverse('logout'), response.content)
# account - side-bar links
def test_account__side_bar_links__admin(self):
self.client.login(username=self.admin.username, password=PASSWORD)
response = self.client.get(reverse('account'))
# User
self.assertIn("My Profile", response.content)
self.assertIn(reverse('main:user_detail', kwargs={'pk': self.admin.pk}), response.content)
self.assertIn("Change Password", response.content)
self.assertIn(reverse('password_change'), response.content)
# Hotel
self.assertIn("Hotel Info", response.content)
self.assertIn(reverse('main:hotel_update', kwargs={'pk': self.admin.profile.hotel.pk}), response.content)
# Guests
self.assertIn("Guest List", response.content)
self.assertIn(reverse('concierge:guest_list'), response.content)
self.assertIn("Add a Guest", response.content)
self.assertIn(reverse('concierge:guest_create'), response.content)
# Users
self.assertIn("Manage Users", response.content)
self.assertIn("User List", response.content)
self.assertIn(reverse('main:manage_user_list'), response.content)
self.assertIn("Add a User", response.content)
self.assertIn(reverse('main:create_user'), response.content)
self.assertIn("Add a Manager", response.content)
self.assertIn(reverse('main:create_manager'), response.content)
# Auto-Replies
self.assertIn("Auto Replies", response.content)
self.assertIn(reverse('concierge:replies'), response.content)
# PhoneNumbers
self.assertIn("Phone Numbers List", response.content)
self.assertIn(reverse('sms:ph_num_list'), response.content)
self.assertIn("Add a Phone Number", response.content)
self.assertIn(reverse('sms:ph_num_add'), response.content)
# Billing
self.assertIn("Overview", response.content)
self.assertIn(reverse('payment:summary'), response.content)
self.assertIn("Account Payment Settings", response.content)
self.assertIn(reverse('acct_cost_update', kwargs={'pk': self.admin.profile.hotel.pk}), response.content)
self.assertIn("Change / Add Payment Method", response.content)
self.assertIn(reverse('payment:card_list'), response.content)
self.assertIn("Add Funds", response.content)
self.assertIn(reverse('payment:one_time_payment'), response.content)
self.assertIn("View Payment History", response.content)
self.assertIn(reverse('acct_pmt_history'), response.content)
def test_account__side_bar_links__manager(self):
self.client.login(username=self.manager.username, password=PASSWORD)
response = self.client.get(reverse('account'))
# User
self.assertIn("My Profile", response.content)
self.assertIn(reverse('main:user_detail', kwargs={'pk': self.manager.pk}), response.content)
self.assertIn("Change Password", response.content)
self.assertIn(reverse('password_change'), response.content)
# Hotel
self.assertNotIn("Hotel Info", response.content)
self.assertNotIn(reverse('main:hotel_update', kwargs={'pk': self.manager.profile.hotel.pk}), response.content)
# Guests
self.assertIn("Guest List", response.content)
self.assertIn(reverse('concierge:guest_list'), response.content)
self.assertIn("Add a Guest", response.content)
self.assertIn(reverse('concierge:guest_create'), response.content)
# Users
self.assertIn("Manage Users", response.content)
self.assertIn("User List", response.content)
self.assertIn(reverse('main:manage_user_list'), response.content)
self.assertIn("Add a User", response.content)
self.assertIn(reverse('main:create_user'), response.content)
self.assertNotIn("Add a Manager", response.content)
self.assertNotIn(reverse('main:create_manager'), response.content)
# Auto-Replies
self.assertIn("Auto Replies", response.content)
self.assertIn(reverse('concierge:replies'), response.content)
# PhoneNumbers
self.assertNotIn("Phone Numbers List", response.content)
self.assertNotIn(reverse('sms:ph_num_list'), response.content)
self.assertNotIn("Add a Phone Number", response.content)
self.assertNotIn(reverse('sms:ph_num_add'), response.content)
# Billing
self.assertNotIn("Overview", response.content)
self.assertNotIn(reverse('payment:summary'), response.content)
self.assertNotIn("Account Payment Settings", response.content)
self.assertNotIn(reverse('acct_cost_update', kwargs={'pk': self.manager.profile.hotel.pk}), response.content)
self.assertNotIn("Change / Add Payment Method", response.content)
self.assertNotIn(reverse('payment:card_list'), response.content)
self.assertNotIn("Add Funds", response.content)
self.assertNotIn(reverse('payment:one_time_payment'), response.content)
self.assertNotIn("View Payment History", response.content)
self.assertNotIn(reverse('acct_pmt_history'), response.content)
def test_account__side_bar_links__user(self):
self.client.login(username=self.user.username, password=PASSWORD)
response = self.client.get(reverse('account'))
# User
self.assertIn("My Profile", response.content)
self.assertIn(reverse('main:user_detail', kwargs={'pk': self.user.pk}), response.content)
self.assertIn("Change Password", response.content)
self.assertIn(reverse('password_change'), response.content)
# Hotel
self.assertNotIn("Hotel Info", response.content)
self.assertNotIn(reverse('main:hotel_update', kwargs={'pk': self.user.profile.hotel.pk}), response.content)
# Guests
self.assertIn("Guest List", response.content)
self.assertIn(reverse('concierge:guest_list'), response.content)
self.assertIn("Add a Guest", response.content)
self.assertIn(reverse('concierge:guest_create'), response.content)
# Users
self.assertNotIn("Manage Users", response.content)
self.assertNotIn("User List", response.content)
self.assertNotIn(reverse('main:manage_user_list'), response.content)
self.assertNotIn("Add a User", response.content)
self.assertNotIn(reverse('main:create_user'), response.content)
self.assertNotIn("Add a Manager", response.content)
self.assertNotIn(reverse('main:create_manager'), response.content)
# Auto-Replies
self.assertNotIn("Auto Replies", response.content)
self.assertNotIn(reverse('concierge:replies'), response.content)
# PhoneNumbers
self.assertNotIn("Phone Numbers List", response.content)
self.assertNotIn(reverse('sms:ph_num_list'), response.content)
self.assertNotIn("Add a Phone Number", response.content)
self.assertNotIn(reverse('sms:ph_num_add'), response.content)
# Billing
self.assertNotIn("Overview", response.content)
self.assertNotIn(reverse('payment:summary'), response.content)
self.assertNotIn("Account Payment Settings", response.content)
self.assertNotIn(reverse('acct_cost_update', kwargs={'pk': self.user.profile.hotel.pk}), response.content)
self.assertNotIn("Change / Add Payment Method", response.content)
self.assertNotIn(reverse('payment:card_list'), response.content)
self.assertNotIn("Add Funds", response.content)
self.assertNotIn(reverse('payment:one_time_payment'), response.content)
self.assertNotIn("View Payment History", response.content)
self.assertNotIn(reverse('acct_pmt_history'), response.content)
### inherit from - django.contrib.auth.forms
### 2 views for password change
def test_password_change(self):
# login required view
response = self.client.get(reverse('password_change'))
self.assertEqual(response.status_code, 302)
# login
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_change'))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['form'])
def test_password_change_done(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_change_done'))
self.assertEqual(response.status_code, 200)
def test_password_change_done__logged_out(self):
# login required view
response = self.client.get(reverse('password_change_done'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'),
reverse('password_change_done')))
### 4 views for password reset
def test_password_reset(self):
response = self.client.get(reverse('password_reset'))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['form'])
self.assertTrue(response.context['headline'])
def test_password_reset_done(self):
response = self.client.get(reverse('password_reset_done'))
self.assertEqual(response.status_code, 200)
def test_password_reset_confirm(self):
# TODO: write an integration for Form test for this
pass
def test_password_reset_complete(self):
response = self.client.get(reverse('password_reset_complete'))
self.assertEqual(response.status_code, 200)
class RegistrationTests(TestCase):
def setUp(self):
create._get_groups_and_perms()
self.hotel = create_hotel()
self.user = create_hotel_user(self.hotel, group="hotel_admin")
# Login
self.client.login(username=self.user.username, password=PASSWORD)
# register_step3
def test_get(self):
response = self.client.get(reverse('register_step3'))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], AcctCostForm)
def test_get__logged_out(self):
self.client.logout()
response = self.client.get(reverse('register_step3'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'), reverse('register_step3')))
def test_create(self):
# Step 3
response = self.client.post(reverse('register_step3'),
CREATE_ACCTCOST_DICT, follow=True)
self.assertRedirects(response, reverse('payment:register_step4'))
# created n linked to Hotel
acct_cost = AcctCost.objects.get(hotel=self.hotel)
self.assertIsInstance(acct_cost, AcctCost)
# Dave tries to view the page again and is redirected to the UpdateView
response = self.client.get(reverse('register_step3'), follow=True)
self.assertRedirects(response, reverse('register_step3_update', kwargs={'pk': acct_cost.pk}))
def test_update(self):
# Step 3 UpdateView
# Dave wants to update his choice
acct_cost = mommy.make(AcctCost, hotel=self.hotel)
response = self.client.get(reverse('register_step3_update', kwargs={'pk': acct_cost.pk}))
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.context['form'], AcctCostForm)
def test_update__logged_out(self):
# Step 3 UpdateView
# Dave wants to update his choice
self.client.logout()
acct_cost = mommy.make(AcctCost, hotel=self.hotel)
response = self.client.get(reverse('register_step3_update', kwargs={'pk': acct_cost.pk}),
follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'), reverse('register_step3')))
def test_update__no_account_cost(self):
# Dave doesn't have an AcctCost yet, and tries to go to another Hotel's AcctCost page
other_hotel = create_hotel()
other_acct_cost = mommy.make(AcctCost, hotel=other_hotel)
response = self.client.get(reverse('register_step3_update',
kwargs={'pk': other_acct_cost.pk}), follow=True)
self.assertRedirects(response, reverse('register_step3'))
def test_update__other_hotel_account_cost(self):
# Dave has an AcctCost, and tries to go to another Hotel's AcctCost page
acct_cost = mommy.make(AcctCost, hotel=self.hotel)
other_hotel = create_hotel()
other_acct_cost = mommy.make(AcctCost, hotel=other_hotel)
response = self.client.get(reverse('register_step3_update',
kwargs={'pk': other_acct_cost.pk}), follow=True)
self.assertRedirects(response, reverse('register_step3_update',
kwargs={'pk': acct_cost.pk}))
class AcctStmtAndOtherAccountViewTests(TestCase):
fixtures = ['trans_type.json']
def setUp(self):
self.hotel = create_hotel()
create._get_groups_and_perms()
self.admin = create_hotel_user(hotel=self.hotel, username='admin', group='hotel_admin')
# dates
self.today = datetime.datetime.today()
self.year = self.today.year
self.month = self.today.month
# Account Data
self.pricing = mommy.make(Pricing, hotel=self.hotel)
self.acct_trans = create_acct_trans(hotel=self.hotel)
self.acct_stmt = AcctStmt.objects.get_or_create(hotel=self.hotel, year=self.year, month=self.month)
# Login
self.client.login(username=self.admin.username, password=PASSWORD)
# Create other Hotel to show that the 1st main Hotel is not affected
# and all views / queries return the expected results
self.hotel_2 = create_hotel()
self.acct_stmt = create_acct_stmt(hotel=self.hotel_2, year=self.year, month=self.month)
self.acct_trans = create_acct_trans(hotel=self.hotel_2)
def tearDown(self):
self.client.logout()
### ACCT COST
def test_acct_cost_update__get(self):
acct_cost, created = AcctCost.objects.get_or_create(self.hotel)
response = self.client.get(reverse('acct_cost_update', kwargs={'pk':acct_cost.pk}))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['form'])
self.assertTrue(response.context['breadcrumbs'])
def test_acct_cost_update__get__logged_out(self):
self.client.logout()
acct_cost, created = AcctCost.objects.get_or_create(self.hotel)
response = self.client.get(reverse('acct_cost_update', kwargs={'pk':acct_cost.pk}),
follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'),
reverse('acct_cost_update', kwargs={'pk':acct_cost.pk})))
def test_acct_cost_update_post(self):
data = {
'balance_min': BALANCE_AMOUNTS[0][0],
'recharge_amt': CHARGE_AMOUNTS[0][0],
'auto_recharge': True
}
acct_cost, created = AcctCost.objects.get_or_create(self.hotel)
response = self.client.post(reverse('acct_cost_update', kwargs={'pk':acct_cost.pk}),
data, follow=True)
self.assertRedirects(response, reverse('payment:summary'))
# success message from ``FormUpdateMessageMixin``
m = list(response.context['messages'])
self.assertEqual(len(m), 1)
### ACCT STMT DETAIL
def test_acct_stmt_detail__response(self):
response = self.client.get(reverse('acct_stmt_detail',
kwargs={'year': self.year, 'month': self.month}))
self.assertEqual(response.status_code, 200)
def test_acct_stmt_detail__logged_out(self):
self.client.logout()
response = self.client.get(reverse('acct_stmt_detail',
kwargs={'year': self.year, 'month': self.month}), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'),
reverse('acct_stmt_detail', kwargs={'year': self.year, 'month': self.month})))
def test_acct_stmt_detail__context(self):
response = self.client.get(reverse('acct_stmt_detail',
kwargs={'year': self.year, 'month': self.month}))
self.assertTrue(response.context['acct_stmt'])
self.assertTrue(response.context['acct_stmts'])
for ea in ['sms_used', 'phone_number']:
self.assertIn(ea, response.context['debit_trans_types'])
def test_acct_stmt_detail_breadcrumbs(self):
response = self.client.get(reverse('acct_stmt_detail',
kwargs={'year': self.year, 'month': self.month}))
self.assertTrue(response.context['breadcrumbs'])
def test_context_acct_trans(self):
acct_tran = AcctTrans.objects.filter(hotel=self.hotel, trans_type__name='sms_used').first()
self.assertTrue(acct_tran)
response = self.client.get(reverse('acct_stmt_detail',
kwargs={'year': self.year, 'month': self.month}))
self.assertTrue(response.context['monthly_trans'])
# the "sms_used" AcctTrans is populating the table how we expect
self.assertIn(acct_tran.insert_date.strftime("%B %-d, %Y"), response.content)
self.assertIn("sms used", response.content)
self.assertIn(str(acct_tran.sms_used), response.content)
self.assertIn('${:.2f}'.format(acct_tran.amount/100.0), response.content)
self.assertIn('${:.2f}'.format(acct_tran.balance/100.0), response.content)
### ACCT PMT HISTORY
def test_acct_pmt_history__response(self):
response = self.client.get(reverse('acct_pmt_history'))
self.assertEqual(response.status_code, 200)
def test_acct_pmt_history__logged_out(self):
self.client.logout()
response = self.client.get(reverse('acct_pmt_history'), follow=True)
self.assertRedirects(response, "{}?next={}".format(reverse('login'),
reverse('acct_pmt_history')))
def test_acct_pmt_history__context(self):
response = self.client.get(reverse('acct_pmt_history'))
self.assertTrue(response.context['object_list'])
def test_acct_pmt_history__breadcrumbs(self):
response = self.client.get(reverse('acct_pmt_history'))
self.assertTrue(response.context['breadcrumbs'])
def test_acct_pmt_history__context_record(self):
acct_tran = AcctTrans.objects.filter(hotel=self.hotel, trans_type__name='init_amt').first()
self.assertTrue(acct_tran)
response = self.client.get(reverse('acct_pmt_history'))
self.assertIn(acct_tran.insert_date.strftime("%B %-d, %Y"), response.content)
self.assertIn("init amt", response.content)
self.assertIn('${:.2f}'.format(acct_tran.amount/100.0), response.content)
class APITests(TestCase):
def setUp(self):
self.hotel = create_hotel()
self.pricing = mommy.make(Pricing, hotel=self.hotel)
def test_pricing(self):
response = self.client.get(reverse('api_pricing'))
self.assertEqual(response.status_code, 200)
def test_pricing_get_indiv(self):
price = Pricing.objects.first()
response = self.client.get(reverse('api_pricing', kwargs={'pk': price.pk}))
self.assertEqual(response.status_code, 200)
class AccountDeactivatedTests(TestCase):
# Test Rending of view, template path is correct, url
# User of each permission type needed
def setUp(self):
create._get_groups_and_perms()
self.password = PASSWORD
self.hotel = create_hotel()
self.admin = create_hotel_user(self.hotel, 'admin', 'hotel_admin')
self.manager = create_hotel_user(self.hotel, 'manager', 'hotel_manager')
self.user = create_hotel_user(self.hotel, 'user')
# Subaccount
self.sub = make_subaccount_live(self.hotel)
# Login
self.client.login(username=self.user.username, password=self.password)
def tearDown(self):
# set back to "active" b/c this is a live Twilio Subaccount
self.assertEqual(self.sub.activate(), 'active')
# subaccount - warnings
def test_active_subaccount_no_warning_message(self):
self.assertTrue(self.hotel.subaccount.active)
response = self.client.get(reverse('account'))
self.assertEqual(response.status_code, 200)
self.assertNotIn(
"SMS sending and receiving has been deactivated",
response.content
)
def test_deactivated_subaccount_shows_warning_message(self):
self.sub.deactivate()
response = self.client.get(reverse('account'))
self.assertEqual(response.status_code, 200)
self.assertIn(
"SMS sending and receiving has been deactivated",
response.content
)
# not stripe customer - warnings
def test_no_stripe_customer_warning(self):
self.assertIsNone(self.hotel.customer)
response = self.client.get(reverse('account'))
self.assertEqual(response.status_code, 200)
self.assertIn(
"No account funds. Click the link to add initial funds and avoid the account being deactivated.",
response.content
)
def test_no_stripe_customer_warning__not_present(self):
customer = mommy.make(Customer)
self.hotel = self.hotel.update_customer(customer)
self.assertIsNotNone(self.hotel.customer)
response = self.client.get(reverse('account'))
self.assertEqual(response.status_code, 200)
self.assertNotIn(
"No account funds. Click the link to add initial funds and avoid the account being deactivated.",
response.content
)
class LoginTests(TestCase):
def setUp(self):
self.password = '1111'
self.user = User.objects.create_user('Bobby',
settings.DEFAULT_FROM_EMAIL, self.password)
def test_get(self):
response = self.client.get(reverse('login'))
assert response.status_code == 200
assert response.context['form']
class PasswordChangeTests(TestCase):
def setUp(self):
self.password = '1111'
self.new_password = '2222'
self.user = User.objects.create_user('Bobby',
settings.DEFAULT_FROM_EMAIL, self.password)
def test_get(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_change'))
assert response.status_code == 200
assert response.context['form']
def test_get_done(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_change_done'))
assert response.status_code == 200
class PasswordResetTests(TestCase):
def setUp(self):
self.password = '1111'
self.new_password = '2222'
self.user = User.objects.create_user('Bobby',
settings.DEFAULT_FROM_EMAIL, self.password)
def test_get(self):
response = self.client.get(reverse('password_reset'))
assert response.status_code == 200
assert response.context['form']
def test_get_done(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_reset_done'))
assert response.status_code == 200
def test_get_done(self):
self.client.login(username=self.user.username, password=self.password)
response = self.client.get(reverse('password_reset_complete'))
assert response.status_code == 200
| [
"[email protected]"
] | |
d3f3032c4627dd131772f1abe4d43e3da33a3083 | cfd4cc45dc558eba3c45797dbb5335e51a90b581 | /gopython3/core/tests/test_unit.py | 02e68291594bb6c1efdd15444a5a57d4ed507f59 | [
"MIT"
] | permissive | futurecolors/gopython3 | 8724ae4e458e156b82161d6b4083ac7c5a4f3eeb | cfff51f86edb962bba2a51c8f7691454af54809d | refs/heads/dev | 2020-05-18T21:50:11.638433 | 2014-01-21T18:28:01 | 2014-01-21T18:28:01 | 12,966,604 | 1 | 0 | null | 2014-05-17T08:38:23 | 2013-09-20T05:52:19 | Python | UTF-8 | Python | false | false | 4,991 | py | # coding: utf-8
from unittest.mock import patch
from django.utils import timezone
import warnings
from collections import namedtuple
from django.test import TestCase
from ..factories import SpecFactory, JobFactory
from ..models import Job, Package, Spec
from ..tasks import query_pypi
def fake_distributions(*distributions):
Distribution = namedtuple('Distribution', ['name', 'version'])
result = []
for dist in distributions:
name, version = dist.split('==')
result.append(Distribution(name, version))
return result
def fake_requirement(name, specs):
Requirement = namedtuple('Requirement', ['name', 'specs', 'extras'])
return Requirement(name, specs, extras=[])
class JobTest(TestCase):
def setUp(self):
self.reqs_txt = """
-r some_missing_file
django>=1.4,<1.5
Django-Geoip==0.3
# tests below
coverage
coveralls>0.2
# TODO: VCS
"""
def test_can_be_created_from_requirements_txt(self):
with warnings.catch_warnings():
# We're ignoring -r not being parsed
# "Recursive requirements not supported. Skipping."
warnings.simplefilter("ignore", category=UserWarning)
job = Job.objects.create_from_requirements(self.reqs_txt)
assert job.requirements == self.reqs_txt
assert list(map(str, job.lines.all().order_by('pk'))) == [
'django>=1.4,<1.5',
'Django-Geoip==0.3',
'coverage',
'coveralls>0.2']
class JobStatusTest(TestCase):
def test_completed_if_no_specs_no_lines(self):
job = JobFactory()
assert job.status == 'success', 'No specs, no lines'
def test_pending_if_unparsed_lines(self):
job = JobFactory(lines=['spanish=42,inquisition==7'])
assert job.status == 'pending', 'It has 2 unparsed lines'
def test_pending_if_pending_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
assert job.status == 'running', 'It has 2 unfinished specs, but lines are parsed'
def test_running_if_running_and_finished_specs(self):
job = JobFactory(specs=['foo=1,bar==2'])
spec = job.specs.first()
spec.status = 'running'
spec.save()
job = Job.objects.get(pk=job.pk)
assert job.status == 'running', 'Job has started, but has not finished yet'
def test_running_if_one_spec_pending(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'One spec pending'
def test_running_if_finished_and_pending_specs(self):
job = JobFactory(specs=['steve==1', 'jobs==2'])
spec = job.specs.first()
spec.status = 'finished'
spec.save()
assert job.status == 'running', 'One spec has finished, but 1 line is not parsed yet'
def test_completed_if_specs_completed(self):
job = JobFactory(specs=['foo=1,bar==2'])
job.specs.all().update(status='success')
job = Job.objects.get(pk=job.pk)
assert job.status == 'success', 'All specs have finished'
class JobSpecTest(TestCase):
def test_process_requirement(self):
job = JobFactory(lines=['Django==1.5.4'])
package, package_created, spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('Django==1.5.4'))
assert list(map(str, Package.objects.all())) == ['Django']
assert list(map(str, job.specs.all())) == ['Django==1.5.4']
assert package_created
assert spec_created
def test_does_not_create_duplicate_specs(self):
spec = SpecFactory(version='0.2.19', package__name='lettuce')
job = JobFactory(lines=['lettuce==0.2.19'])
same_package, package_created, same_spec, spec_created = job.lines.all()[0].set_distribution(*fake_distributions('lettuce==0.2.19'))
assert not package_created
assert not spec_created
assert Spec.objects.count() == 1
assert Package.objects.count() == 1
assert job.specs.all().first().version == spec.version
assert job.specs.all().first().package.name == spec.package.name
assert spec.pk == same_spec.pk
assert same_package.pk == same_spec.package.pk
class PypiTaskTest(TestCase):
@patch('api.PyPI.get_info')
def test_updates_spec(self, get_info_mock):
last_release_date = timezone.now()
py3_versions = ['3', '3.2', '3.3']
get_info_mock.return_value = {
'last_release_date': last_release_date,
'py3_versions': py3_versions,
}
spec = SpecFactory(version='0.2.19', package__name='lettuce')
assert query_pypi(spec.pk) == get_info_mock.return_value
spec = Spec.objects.get(pk=spec.pk)
assert spec.release_date == last_release_date
assert spec.python_versions == py3_versions
| [
"[email protected]"
] | |
675b01dcde98168800671ad211778faa2ce9b622 | 5b9ac627bf39b01917f75d18d8ca83211a04c718 | /cahoots/confidence/normalizers/character.py | eb3454f1cbed230ef870e1d916a85b2295aff56f | [
"MIT"
] | permissive | SerenitySoftware/cahoots | dbfa109e4c65d20ef01c2d97d3087e7a8aede838 | 866336c51436343ff5e56f83f89dddc82a5693a3 | refs/heads/master | 2021-05-28T20:00:32.827485 | 2015-08-23T00:31:11 | 2015-08-23T00:31:11 | 21,884,335 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | """
The MIT License (MIT)
Copyright (c) Serenity Software, LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from cahoots.confidence.normalizers.base import BaseNormalizer
class CharacterWithoutBoolean(BaseNormalizer):
"""If we get a character and not a boolean, we boost char confidence"""
@staticmethod
def test(types, _):
"""
We want to normalize if there is a character and not a boolean
:param types: list of result types
:type types: list
:param all_types: list of result types + subtypes
:type all_types: list
:return: if this normalizer should normalize this result set
:rtype: bool
"""
return 'Character' in types and 'Boolean' not in types
@staticmethod
def normalize(results):
"""
setting char confidence to 100% if there's no boolean result
:param results: list of results we want to normalize
:type results: list
:return: the normalized results
:rtype: list
"""
for result in [r for r in results if r.type == 'Character']:
result.confidence = 100
return results
| [
"[email protected]"
] | |
fcc28c4fb295fc56e743a94f929317d9aac54d4f | 085ce75a507df6e755cabb7a65c4a2a8c98762ba | /dockerfiles/root/.pycharm_helpers/python_stubs/-252567642/_yaml/__init__/SequenceNode.py | 2cc52374a86f60654ef8c8168c3539260e1b19ab | [] | no_license | Arhzi/habr-docker-article | d44302db1fe157d81fe0818e762e82218f50e31f | 6fb094860b612e307beadaeb22981aa0ee64e964 | refs/heads/master | 2021-01-23T20:41:47.398025 | 2015-12-10T08:56:33 | 2015-12-10T08:56:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | # encoding: utf-8
# module _yaml
# from /usr/local/lib/python2.7/site-packages/_yaml.so
# by generator 1.137
# no doc
# imports
import yaml as yaml # /usr/local/lib/python2.7/site-packages/yaml/__init__.pyc
import __builtin__ as __builtins__ # <module '__builtin__' (built-in)>
import yaml.error as __yaml_error
import yaml.events as __yaml_events
import yaml.nodes as __yaml_nodes
import yaml.tokens as __yaml_tokens
class SequenceNode(__yaml_nodes.CollectionNode):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
id = 'sequence'
| [
"[email protected]"
] | |
dbed30c53f73497797ddbe0811d3e196d96974ef | 288865b3b519222370b00cda04ffab96f46b046d | /dd/deal/urls.py | 96efada9c080d2357cb3a4f0720c588ba3262eac | [] | no_license | bcattle/dolores-deals | 9c49daefb83f35eff65262dd14d5756a06eea66f | d45914c9afbeca9dbd655eee5b8ba021b2e07760 | refs/heads/master | 2020-12-24T16:59:28.981973 | 2011-05-08T08:29:47 | 2011-05-08T08:29:47 | 1,445,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | from django.conf.urls.defaults import *
urlpatterns = patterns('deal.views',
# regex, view fxn, args, label for get_absolute_url()
(r'^(?P<city_slug>[-\w]+)/(?P<neighborhood_slug>[-\w]+)/(?P<deal_slug>[-\w]+)/$',
'show_deal', { 'template_name': 'deal.html' }, 'deal_page'),
(r'^$', 'default_deal'),
) | [
"[email protected]"
] | |
41eeb3b1e3a7cf61c0e2b16ae63ce4c9826894f2 | d785e993ed65049c82607a1482b45bddb2a03dda | /loose/loose_SingleMuon_G_cfg.py | 46a90679fcd22958110855f1922ec226777629c0 | [] | no_license | PKUHEPEWK/ssww | eec02ad7650014646e1bcb0e8787cf1514aaceca | a507a289935b51b8abf819b1b4b05476a05720dc | refs/heads/master | 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'loose_SingleMuon_G'
config.General.transferLogs= True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script.sh'
config.JobType.inputFiles = ['crab_loose_data_script.py','ssww_keep_and_drop.txt','ssww_output_branch_selection.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/SingleMuon/Run2016G-Nano14Dec2018-v1/NANOAOD'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'LumiBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 60
config.Data.lumiMask = 'https://cms-service-dqm.web.cern.ch/cms-service-dqm/CAF/certification/Collisions16/13TeV/ReReco/Final/Cert_271036-284044_13TeV_23Sep2016ReReco_Collisions16_JSON.txt'
config.Data.outLFNDirBase = '/store/user/%s/nano2016' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'loose_SingleMuon_G'
config.section_("Site")
config.Site.storageSite = "T2_CH_CERNBOX"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
| [
"[email protected]"
] | |
5f8ee58ee1001869ac9653bf17b97fc00ea6d69b | c9952dcac5658940508ddc139344a7243a591c87 | /tests/lab09/test_ch09_t01_befor_we_begin.py | deb2a8cafff91ac4cfaf68983409b92dd410c622 | [] | no_license | wongcyrus/ite3101_introduction_to_programming | 5da1c15212528423b3df91997327fe148abef4de | 7cd76d0861d5355db5a6e2e171735bee2e78f829 | refs/heads/master | 2023-08-31T17:27:06.193049 | 2023-08-21T08:30:26 | 2023-08-21T08:30:26 | 136,574,036 | 3 | 2 | null | 2023-08-21T08:30:28 | 2018-06-08T06:06:49 | Python | UTF-8 | Python | false | false | 384 | py | import unittest
from tests.unit_test_helper.console_test_helper import *
class TestOutput(unittest.TestCase):
def test(self):
temp_globals, temp_locals, content, output = execfile("lab09/ch09_t01_befor_we_begin.py")
expected = """Adam
Alex
Mariah
Martine
Columbus
"""
self.assertEqual(expected, output)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
9bd90299eaa46d54955252ca6e3183a2d1ae3d21 | f72c689bd0d756b4817cc03cb434a228343c8936 | /test/functional/rpc_getchaintips.py | c71c2d08d2237c1b28f7c8b61f61971edd4ffe9d | [
"MIT"
] | permissive | CircuitProject/Circuit-Core | 7f68a8b4cb180a715cb24e247b899d8d8dc29e95 | 831dc33d57050ea2955983b2e8f1fc088a819e97 | refs/heads/main | 2023-04-09T00:08:37.954538 | 2021-04-12T19:09:42 | 2021-04-12T19:09:42 | 357,308,816 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,177 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the getchaintips RPC.
- introduce a network split
- work on chains of different lengths
- join the network together again
- verify that getchaintips now returns two chain tips.
"""
from test_framework.test_framework import CircuitTestFramework
from test_framework.util import assert_equal
class GetChainTipsTest (CircuitTestFramework):
def set_test_params(self):
self.num_nodes = 4
def run_test (self):
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 1)
assert_equal (tips[0]['branchlen'], 0)
assert_equal (tips[0]['height'], 200)
assert_equal (tips[0]['status'], 'active')
# Split the network and build two chains of different lengths.
self.split_network ()
self.nodes[0].generate(10)
self.nodes[2].generate(20)
self.sync_all([self.nodes[:2], self.nodes[2:]])
tips = self.nodes[1].getchaintips ()
assert_equal (len (tips), 1)
shortTip = tips[0]
assert_equal (shortTip['branchlen'], 0)
assert_equal (shortTip['height'], 210)
assert_equal (tips[0]['status'], 'active')
tips = self.nodes[3].getchaintips ()
assert_equal (len (tips), 1)
longTip = tips[0]
assert_equal (longTip['branchlen'], 0)
assert_equal (longTip['height'], 220)
assert_equal (tips[0]['status'], 'active')
# Join the network halves and check that we now have two tips
# (at least at the nodes that previously had the short chain).
self.join_network ()
tips = self.nodes[0].getchaintips ()
assert_equal (len (tips), 2)
assert_equal (tips[0], longTip)
assert_equal (tips[1]['branchlen'], 10)
assert_equal (tips[1]['status'], 'valid-fork')
tips[1]['branchlen'] = 0
tips[1]['status'] = 'active'
assert_equal (tips[1], shortTip)
if __name__ == '__main__':
GetChainTipsTest ().main ()
| [
"[email protected]"
] | |
5f6cb6c94ca4f16d8f6f26845918c9f4b4708db8 | 54d2887e3c910f68366bd0aab3c692d54245e22a | /abc/abc_042_125/abc067/c.py | b4ed060ee9fec1215b76ac1a828de13f9a096cb0 | [] | no_license | Kevinrobot34/atcoder | 7aec367fd2c6b589e9d583dae7b3c7520ce9fa12 | 482ea508f098f81e4f19522fe518dd22c781aca9 | refs/heads/master | 2022-07-10T23:44:45.290022 | 2022-06-29T11:30:26 | 2022-06-29T11:30:26 | 158,081,477 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | n = int(input())
a = list(map(int, input().split()))
s = sum(a)
s1 = 0
ans = 10**15
for i in range(n-1):
s1 += a[i]
ans = min(ans, abs(s1 - (s-s1)))
print(ans)
| [
"[email protected]"
] | |
59cbd00de94ea0b4a3b7608732f8325b879b67fe | 1ddbd4f7194fb52ea8344e8f80dcbd87e8d41cfc | /restconf/pagination.py | 67556c262f5a6216f1bf85d067790445fbcf1af1 | [] | no_license | TruthTheDeveloper/Huggie-backend | cc9444571a5e147e789c2dcfaae51a694d37d917 | 8a686beb2635557f4235047cde3eccd79d3ea3b7 | refs/heads/master | 2023-08-20T06:12:31.465811 | 2021-10-31T18:21:15 | 2021-10-31T18:21:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from rest_framework import pagination
class CFEAPIPagination(pagination.PageNumberPagination):
page_size = 10
# default_limit = 6
# max_limit = 20
# limit_query_param = 'lim' | [
"henrysempire111gmail.com"
] | henrysempire111gmail.com |
8ceb95bec95f8eb8abb7c04384ce7ca03d720ffd | 48faee5b845e43e6c102cb027f43c8b886ecaa5e | /utils/ansible_drive/test/ansibleApi_pbtest.py | 106e2ab5ed5e1582930d2804371155ee451f57a4 | [] | no_license | hornLK/LonedayAdmin | 66c0a8b978967a0144a216f621c872a6d2197229 | 36ba3fe763788423801ad5ab14462624114da804 | refs/heads/master | 2022-12-26T06:57:47.675915 | 2018-05-15T13:08:34 | 2018-05-15T13:08:34 | 131,375,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,149 | py | import json
from ansible import constants as C
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.utils.ssh_functions import check_for_controlpersist
from ansible.plugins.callback import CallbackBase
def inventory_list(url,api_key):
SECRET_API_KEY = api_key
time_span = time.time()
secret_data = "%s|%f" % (SECRET_API_KEY,time_span)
hash_obj = hashlib.md5(secret_data.encode("utf-8"))
encryption = hash_obj.hexdigest()
send_data = encryption+"|"+str(time_span)
headers = {'content-type':'application/json',"X-Http-Secretkey":send_data}
res = requests.get(url,headers=headers)
dict_json = [host.get("hostIP") for host in json.loads(res.text)]
re_str = ",".join(dict_json)
return re_str
loader = DataLoader()
api_key = "0a37511d-be7d-4fdd-ab17-28b6c659d763"
url = "http://192.168.220.3:8890/apiv1/auths/host/list/"
Options = namedtuple('Options', ['connection', 'module_path', 'forks',
'become', 'become_method', 'become_user',
'check', 'diff'])
options = Options(connection='192.168.220.3',
module_path=['/path/to/mymodules'], forks=100, become=None,
become_method=None, become_user=None, check=False,
diff=False)
passwords = dict(vault_pass='123123')
results_callback = ResultCallback()
class ResultCallback():
def v2_playbook_on_start(self,playbook):
print(playbook)
print(dir(playbook))
class LkPlaybookExecutor(PlaybookExecutor):
def __init__(self,playbooks,inventory,variable_manager,loader,options,passwords,stdout_callback=None):
self._stdout_callback=stdout_callback
super(LkPlaybookExecutor,self).__init__(playbooks, inventory,
variable_manager, loader,
options, passwords)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.