code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from datetime import datetime
from typing import Union
from yt_dlp.utils import sanitize_filename
from ytdl_sub.entries.base_entry import BaseEntry
from ytdl_sub.entries.base_entry import BaseEntryVariables
from ytdl_sub.entries.variables.kwargs import CHANNEL
from ytdl_sub.entries.variables.kwargs import CHANNEL_ID
from ytdl_sub.entries.variables.kwargs import CREATOR
from ytdl_sub.entries.variables.kwargs import DOWNLOAD_INDEX
from ytdl_sub.entries.variables.kwargs import EXT
from ytdl_sub.entries.variables.kwargs import PLAYLIST_COUNT
from ytdl_sub.entries.variables.kwargs import PLAYLIST_DESCRIPTION
from ytdl_sub.entries.variables.kwargs import PLAYLIST_INDEX
from ytdl_sub.entries.variables.kwargs import PLAYLIST_MAX_UPLOAD_YEAR
from ytdl_sub.entries.variables.kwargs import PLAYLIST_MAX_UPLOAD_YEAR_TRUNCATED
from ytdl_sub.entries.variables.kwargs import PLAYLIST_TITLE
from ytdl_sub.entries.variables.kwargs import PLAYLIST_UID
from ytdl_sub.entries.variables.kwargs import PLAYLIST_UPLOADER
from ytdl_sub.entries.variables.kwargs import PLAYLIST_UPLOADER_ID
from ytdl_sub.entries.variables.kwargs import PLAYLIST_UPLOADER_URL
from ytdl_sub.entries.variables.kwargs import PLAYLIST_WEBPAGE_URL
from ytdl_sub.entries.variables.kwargs import RELEASE_DATE
from ytdl_sub.entries.variables.kwargs import SOURCE_COUNT
from ytdl_sub.entries.variables.kwargs import SOURCE_DESCRIPTION
from ytdl_sub.entries.variables.kwargs import SOURCE_INDEX
from ytdl_sub.entries.variables.kwargs import SOURCE_TITLE
from ytdl_sub.entries.variables.kwargs import SOURCE_UID
from ytdl_sub.entries.variables.kwargs import SOURCE_UPLOADER
from ytdl_sub.entries.variables.kwargs import SOURCE_UPLOADER_ID
from ytdl_sub.entries.variables.kwargs import SOURCE_UPLOADER_URL
from ytdl_sub.entries.variables.kwargs import SOURCE_WEBPAGE_URL
from ytdl_sub.entries.variables.kwargs import UPLOAD_DATE
from ytdl_sub.entries.variables.kwargs import UPLOAD_DATE_INDEX
# This file contains mixins to a BaseEntry subclass. Ignore pylint's "no kwargs member" suggestion
# pylint: disable=no-member
# pylint: disable=too-many-public-methods
def pad(num: int, width: int = 2):
"""Pad integers"""
return str(num).zfill(width)
_days_in_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
Self = Union[BaseEntry, "EntryVariables"]
class EntryVariables(BaseEntryVariables):
@property
def source_title(self: Self) -> str:
"""
Returns
-------
str
Name of the source (i.e. channel with multiple playlists) if it exists, otherwise
returns its playlist_title.
"""
return self.kwargs_get(SOURCE_TITLE, self.playlist_title)
@property
def source_title_sanitized(self: Self) -> str:
"""
Returns
-------
str
The source title, sanitized
"""
return sanitize_filename(self.source_title)
@property
def source_uid(self: Self) -> str:
"""
Returns
-------
str
The source unique id if it exists, otherwise returns the playlist unique ID.
"""
return self.kwargs_get(SOURCE_UID, self.playlist_uid)
@property
def source_index(self: Self) -> int:
"""
Returns
-------
int
Source index if it exists, otherwise returns ``1``.
It is recommended to not use this unless you know the source will never add new content
(it is easy for this value to change).
"""
return self.kwargs_get(SOURCE_INDEX, self.playlist_index)
@property
def source_index_padded(self: Self) -> str:
"""
Returns
-------
int
The source index, padded.
"""
return pad(self.source_index, 2)
@property
def source_count(self: Self) -> int:
"""
Returns
-------
int
The source count if it exists, otherwise returns the playlist count.
"""
return self.kwargs_get(SOURCE_COUNT, self.playlist_count)
@property
def source_webpage_url(self: Self) -> str:
"""
Returns
-------
str
The source webpage url if it exists, otherwise returns the playlist webpage url.
"""
return self.kwargs_get(SOURCE_WEBPAGE_URL, self.playlist_webpage_url)
@property
def source_description(self: Self) -> str:
"""
Returns
-------
str
The source description if it exists, otherwise returns the playlist description.
"""
return self.kwargs_get(SOURCE_DESCRIPTION, self.playlist_description)
@property
def playlist_uid(self: Self) -> str:
"""
Returns
-------
str
The playlist unique ID if it exists, otherwise return the entry unique ID.
"""
return self.kwargs_get(PLAYLIST_UID, self.uid)
@property
def playlist_title(self: Self) -> str:
"""
Returns
-------
str
Name of its parent playlist/channel if it exists, otherwise returns its title.
"""
return self.kwargs_get(PLAYLIST_TITLE, self.title)
@property
def playlist_title_sanitized(self: Self) -> str:
"""
Returns
-------
str
The playlist name, sanitized
"""
return sanitize_filename(self.playlist_title)
@property
def playlist_index(self: Self) -> int:
"""
Returns
-------
int
Playlist index if it exists, otherwise returns ``1``.
Note that for channels/playlists, any change (i.e. adding or removing a video) will make
this value change. Use with caution.
"""
return self.kwargs_get(PLAYLIST_INDEX, 1)
@property
def playlist_index_reversed(self: Self) -> int:
"""
Returns
-------
int
Playlist index reversed via ``playlist_count - playlist_index + 1``
"""
return self.playlist_count - self.playlist_index + 1
@property
def playlist_index_padded(self: Self) -> str:
"""
Returns
-------
str
playlist_index padded two digits
"""
return pad(self.playlist_index, width=2)
@property
def playlist_index_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
playlist_index_reversed padded two digits
"""
return pad(self.playlist_index_reversed, width=2)
@property
def playlist_index_padded6(self: Self) -> str:
"""
Returns
-------
str
playlist_index padded six digits.
"""
return pad(self.playlist_index, width=6)
@property
def playlist_index_reversed_padded6(self: Self) -> str:
"""
Returns
-------
str
playlist_index_reversed padded six digits.
"""
return pad(self.playlist_index_reversed, width=6)
@property
def playlist_count(self: Self) -> int:
"""
Returns
-------
int
Playlist count if it exists, otherwise returns ``1``.
Note that for channels/playlists, any change (i.e. adding or removing a video) will make
this value change. Use with caution.
"""
return self.kwargs_get(PLAYLIST_COUNT, 1)
@property
def playlist_description(self: Self) -> str:
"""
Returns
-------
str
The playlist description if it exists, otherwise returns the entry's description.
"""
return self.kwargs_get(PLAYLIST_DESCRIPTION, self.description)
@property
def playlist_webpage_url(self: Self) -> str:
"""
Returns
-------
str
The playlist webpage url if it exists. Otherwise, returns the entry webpage url.
"""
return self.kwargs_get(PLAYLIST_WEBPAGE_URL, self.webpage_url)
@property
def playlist_max_upload_year(self: Self) -> int:
"""
Returns
-------
int
Max upload_year for all entries in this entry's playlist if it exists, otherwise returns
``upload_year``
"""
# override in EntryParent
return self.kwargs_get(PLAYLIST_MAX_UPLOAD_YEAR, self.upload_year)
@property
def playlist_max_upload_year_truncated(self: Self) -> int:
"""
Returns
-------
int
The max playlist truncated upload year for all entries in this entry's playlist if it
exists, otherwise returns ``upload_year_truncated``.
"""
return self.kwargs_get(PLAYLIST_MAX_UPLOAD_YEAR_TRUNCATED, self.upload_year_truncated)
@property
def playlist_uploader_id(self: Self) -> str:
"""
Returns
-------
str
The playlist uploader id if it exists, otherwise returns the entry uploader ID.
"""
return self.kwargs_get(PLAYLIST_UPLOADER_ID, self.uploader_id)
@property
def playlist_uploader(self: Self) -> str:
"""
Returns
-------
str
The playlist uploader if it exists, otherwise return the entry uploader.
"""
return self.kwargs_get(PLAYLIST_UPLOADER, self.uploader)
@property
def playlist_uploader_sanitized(self: Self) -> str:
"""
Returns
-------
str
The playlist uploader, sanitized.
"""
return sanitize_filename(self.playlist_uploader)
@property
def playlist_uploader_url(self: Self) -> str:
"""
Returns
-------
str
The playlist uploader url if it exists, otherwise returns the playlist webpage_url.
"""
return self.kwargs_get(PLAYLIST_UPLOADER_URL, self.playlist_webpage_url)
@property
def source_uploader_id(self: Self) -> str:
"""
Returns
-------
str
The source uploader id if it exists, otherwise returns the playlist_uploader_id
"""
return self.kwargs_get(SOURCE_UPLOADER_ID, self.playlist_uploader_id)
@property
def source_uploader(self: Self) -> str:
"""
Returns
-------
str
The source uploader if it exists, otherwise return the playlist_uploader
"""
return self.kwargs_get(SOURCE_UPLOADER, self.playlist_uploader)
@property
def source_uploader_url(self: Self) -> str:
"""
Returns
-------
str
The source uploader url if it exists, otherwise returns the source webpage_url.
"""
return self.kwargs_get(SOURCE_UPLOADER_URL, self.source_webpage_url)
@property
def creator(self: Self) -> str:
"""
Returns
-------
str
The creator name if it exists, otherwise returns the channel.
"""
return self.kwargs_get(CREATOR, self.channel)
@property
def creator_sanitized(self: Self) -> str:
"""
Returns
-------
str
The creator name, sanitized
"""
return sanitize_filename(self.creator)
@property
def channel(self: Self) -> str:
"""
Returns
-------
str
The channel name if it exists, otherwise returns the uploader.
"""
return self.kwargs_get(CHANNEL, self.uploader)
@property
def channel_sanitized(self: Self) -> str:
"""
Returns
-------
str
The channel name, sanitized.
"""
return sanitize_filename(self.channel)
@property
def channel_id(self: Self) -> str:
"""
Returns
-------
str
The channel id if it exists, otherwise returns the entry uploader ID.
"""
return self.kwargs_get(CHANNEL_ID, self.uploader_id)
@property
def ext(self: Self) -> str:
"""
Returns
-------
str
The downloaded entry's file extension
"""
return self.kwargs(EXT)
@property
def thumbnail_ext(self: Self) -> str:
"""
Returns
-------
str
The download entry's thumbnail extension. Will always return 'jpg'. Until there is a
need to support other image types, we always convert to jpg.
"""
return "jpg"
@property
def download_index(self: Self) -> int:
"""
Returns
-------
int
The i'th entry downloaded. NOTE that this is fetched dynamically from the download
archive.
"""
return self.kwargs_get(DOWNLOAD_INDEX, 0) + 1
@property
def download_index_padded6(self: Self) -> str:
"""
Returns
-------
str
The download_index padded six digits
"""
return pad(self.download_index, 6)
@property
def upload_date_index(self: Self) -> int:
"""
Returns
-------
int
The i'th entry downloaded with this upload date.
"""
return self.kwargs_get(UPLOAD_DATE_INDEX, 0) + 1
@property
def upload_date_index_padded(self: Self) -> str:
"""
Returns
-------
int
The upload_date_index padded two digits
"""
return pad(self.upload_date_index, 2)
@property
def upload_date_index_reversed(self: Self) -> int:
"""
Returns
-------
int
100 - upload_date_index
"""
return 100 - self.upload_date_index
@property
def upload_date_index_reversed_padded(self: Self) -> str:
"""
Returns
-------
int
The upload_date_index padded two digits
"""
return pad(self.upload_date_index_reversed, 2)
@property
def upload_date(self: Self) -> str:
"""
Returns
-------
str
The entry's uploaded date, in YYYYMMDD format. If not present, return today's date.
"""
return self.kwargs_get(UPLOAD_DATE, datetime.now().strftime("%Y%m%d"))
@property
def upload_year(self: Self) -> int:
"""
Returns
-------
int
The entry's upload year
"""
return int(self.upload_date[:4])
@property
def upload_year_truncated(self: Self) -> int:
"""
Returns
-------
int
The last two digits of the upload year, i.e. 22 in 2022
"""
return int(str(self.upload_year)[-2:])
@property
def upload_year_truncated_reversed(self: Self) -> int:
"""
Returns
-------
int
The upload year truncated, but reversed using ``100 - {upload_year_truncated}``, i.e.
2022 returns ``100 - 22`` = ``78``
"""
return 100 - self.upload_year_truncated
@property
def upload_month_reversed(self: Self) -> int:
"""
Returns
-------
int
The upload month, but reversed using ``13 - {upload_month}``, i.e. March returns ``10``
"""
return 13 - self.upload_month
@property
def upload_month_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed upload month, but padded. i.e. November returns "02"
"""
return pad(self.upload_month_reversed)
@property
def upload_month_padded(self: Self) -> str:
"""
Returns
-------
str
The entry's upload month padded to two digits, i.e. March returns "03"
"""
return self.upload_date[4:6]
@property
def upload_day_padded(self: Self) -> str:
"""
Returns
-------
str
The entry's upload day padded to two digits, i.e. the fifth returns "05"
"""
return self.upload_date[6:8]
@property
def upload_month(self: Self) -> int:
"""
Returns
-------
int
The upload month as an integer (no padding).
"""
return int(self.upload_month_padded.lstrip("0"))
@property
def upload_day(self: Self) -> int:
"""
Returns
-------
int
The upload day as an integer (no padding).
"""
return int(self.upload_day_padded.lstrip("0"))
@property
def upload_day_reversed(self: Self) -> int:
"""
Returns
-------
int
The upload day, but reversed using ``{total_days_in_month} + 1 - {upload_day}``,
i.e. August 8th would have upload_day_reversed of ``31 + 1 - 8`` = ``24``
"""
total_days_in_month = _days_in_month[self.upload_month]
if self.upload_month == 2 and self.upload_year % 4 == 0: # leap year
total_days_in_month += 1
return total_days_in_month + 1 - self.upload_day
@property
def upload_day_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed upload day, but padded. i.e. August 30th returns "02".
"""
return pad(self.upload_day_reversed)
@property
def upload_day_of_year(self: Self) -> int:
"""
Returns
-------
int
The day of the year, i.e. February 1st returns ``32``
"""
output = sum(_days_in_month[: self.upload_month]) + self.upload_day
if self.upload_month > 2 and self.upload_year % 4 == 0:
output += 1
return output
@property
def upload_day_of_year_padded(self: Self) -> str:
"""
Returns
-------
str
The upload day of year, but padded i.e. February 1st returns "032"
"""
return pad(self.upload_day_of_year, width=3)
@property
def upload_day_of_year_reversed(self: Self) -> int:
"""
Returns
-------
int
The upload day, but reversed using ``{total_days_in_year} + 1 - {upload_day}``,
i.e. February 2nd would have upload_day_of_year_reversed of ``365 + 1 - 32`` = ``334``
"""
total_days_in_year = 365
if self.upload_year % 4 == 0:
total_days_in_year += 1
return total_days_in_year + 1 - self.upload_day_of_year
@property
def upload_day_of_year_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed upload day of year, but padded i.e. December 31st returns "001"
"""
return pad(self.upload_day_of_year_reversed, width=3)
@property
def upload_date_standardized(self: Self) -> str:
"""
Returns
-------
str
The uploaded date formatted as YYYY-MM-DD
"""
return f"{self.upload_year}-{self.upload_month_padded}-{self.upload_day_padded}"
@property
def release_date(self: Self) -> str:
"""
Returns
-------
str
The entry's release date, in YYYYMMDD format. If not present, return the upload date.
"""
return self.kwargs_get(RELEASE_DATE, self.upload_date)
@property
def release_year(self: Self) -> int:
"""
Returns
-------
int
The entry's release year
"""
return int(self.release_date[:4])
@property
def release_year_truncated(self: Self) -> int:
"""
Returns
-------
int
The last two digits of the release year, i.e. 22 in 2022
"""
return int(str(self.release_year)[-2:])
@property
def release_year_truncated_reversed(self: Self) -> int:
"""
Returns
-------
int
The release year truncated, but reversed using ``100 - {release_year_truncated}``, i.e.
2022 returns ``100 - 22`` = ``78``
"""
return 100 - self.release_year_truncated
@property
def release_month_reversed(self: Self) -> int:
"""
Returns
-------
int
The release month, but reversed
using ``13 - {release_month}``, i.e. March returns ``10``
"""
return 13 - self.release_month
@property
def release_month_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed release month, but padded. i.e. November returns "02"
"""
return pad(self.release_month_reversed)
@property
def release_month_padded(self: Self) -> str:
"""
Returns
-------
str
The entry's release month padded to two digits, i.e. March returns "03"
"""
return self.release_date[4:6]
@property
def release_day_padded(self: Self) -> str:
"""
Returns
-------
str
The entry's release day padded to two digits, i.e. the fifth returns "05"
"""
return self.release_date[6:8]
@property
def release_month(self: Self) -> int:
"""
Returns
-------
int
The release month as an integer (no padding).
"""
return int(self.release_month_padded.lstrip("0"))
@property
def release_day(self: Self) -> int:
"""
Returns
-------
int
The release day as an integer (no padding).
"""
return int(self.release_day_padded.lstrip("0"))
@property
def release_day_reversed(self: Self) -> int:
"""
Returns
-------
int
The release day, but reversed using ``{total_days_in_month} + 1 - {release_day}``,
i.e. August 8th would have release_day_reversed of ``31 + 1 - 8`` = ``24``
"""
total_days_in_month = _days_in_month[self.release_month]
if self.release_month == 2 and self.release_year % 4 == 0: # leap year
total_days_in_month += 1
return total_days_in_month + 1 - self.release_day
@property
def release_day_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed release day, but padded. i.e. August 30th returns "02".
"""
return pad(self.release_day_reversed)
@property
def release_day_of_year(self: Self) -> int:
"""
Returns
-------
int
The day of the year, i.e. February 1st returns ``32``
"""
output = sum(_days_in_month[: self.release_month]) + self.release_day
if self.release_month > 2 and self.release_year % 4 == 0:
output += 1
return output
@property
def release_day_of_year_padded(self: Self) -> str:
"""
Returns
-------
str
The release day of year, but padded i.e. February 1st returns "032"
"""
return pad(self.release_day_of_year, width=3)
@property
def release_day_of_year_reversed(self: Self) -> int:
"""
Returns
-------
int
The release day, but reversed using ``{total_days_in_year} + 1 - {release_day}``,
i.e. February 2nd would have release_day_of_year_reversed of ``365 + 1 - 32`` = ``334``
"""
total_days_in_year = 365
if self.release_year % 4 == 0:
total_days_in_year += 1
return total_days_in_year + 1 - self.release_day_of_year
@property
def release_day_of_year_reversed_padded(self: Self) -> str:
"""
Returns
-------
str
The reversed release day of year, but padded i.e. December 31st returns "001"
"""
return pad(self.release_day_of_year_reversed, width=3)
@property
def release_date_standardized(self: Self) -> str:
"""
Returns
-------
str
The release date formatted as YYYY-MM-DD
"""
return f"{self.release_year}-{self.release_month_padded}-{self.release_day_padded}" | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/entries/variables/entry_variables.py | entry_variables.py |
from typing import List
class KwargKeys:
keys: List[str] = []
backend_keys: List[str] = []
def _(key: str, backend: bool = False) -> str:
if backend:
assert key not in KwargKeys.backend_keys
KwargKeys.backend_keys.append(key)
else:
assert key not in KwargKeys.keys
KwargKeys.keys.append(key)
return key
SOURCE_ENTRY = _("source_entry", backend=True)
SOURCE_INDEX = _("source_index")
SOURCE_COUNT = _("source_count")
SOURCE_TITLE = _("source_title")
SOURCE_UID = _("source_uid")
SOURCE_DESCRIPTION = _("source_description")
SOURCE_WEBPAGE_URL = _("source_webpage_url")
SOURCE_UPLOADER = _("source_uploader")
SOURCE_UPLOADER_ID = _("source_uploader_id")
SOURCE_UPLOADER_URL = _("source_uploader_url")
PLAYLIST_ENTRY = _("playlist_entry", backend=True)
PLAYLIST_WEBPAGE_URL = _("playlist_webpage_url")
PLAYLIST_INDEX = _("playlist_index")
PLAYLIST_COUNT = _("playlist_count")
PLAYLIST_MAX_UPLOAD_YEAR = _("playlist_max_upload_year")
PLAYLIST_MAX_UPLOAD_YEAR_TRUNCATED = _("playlist_max_upload_year_truncated")
PLAYLIST_TITLE = _("playlist_title")
PLAYLIST_DESCRIPTION = _("playlist_description")
PLAYLIST_UID = _("playlist_uid")
PLAYLIST_UPLOADER = _("playlist_uploader")
PLAYLIST_UPLOADER_ID = _("playlist_uploader_id")
PLAYLIST_UPLOADER_URL = _("playlist_uploader_url")
COLLECTION_URL = _("collection_url", backend=True)
DOWNLOAD_INDEX = _("download_index", backend=True)
UPLOAD_DATE_INDEX = _("upload_date_index", backend=True)
REQUESTED_SUBTITLES = _("requested_subtitles", backend=True)
CHAPTERS = _("chapters", backend=True)
YTDL_SUB_CUSTOM_CHAPTERS = _("ytdl_sub_custom_chapters", backend=True)
YTDL_SUB_REGEX_SOURCE_VARS = _("ytdl_sub_regex_source_vars", backend=True)
YTDL_SUB_MATCH_FILTER_REJECT = _("ytdl_sub_match_filter_reject", backend=True)
SPONSORBLOCK_CHAPTERS = _("sponsorblock_chapters", backend=True)
SPLIT_BY_CHAPTERS_PARENT_ENTRY = _("split_by_chapters_parent_entry", backend=True)
COMMENTS = _("comments", backend=True)
UID = _("id")
EXTRACTOR = _("extractor")
IE_KEY = _("ie_key")
EPOCH = _("epoch")
CHANNEL = _("channel")
CHANNEL_ID = _("channel_id")
CREATOR = _("creator")
EXT = _("ext")
TITLE = _("title")
DESCRIPTION = _("description")
WEBPAGE_URL = _("webpage_url")
RELEASE_DATE = _("release_date")
UPLOAD_DATE = _("upload_date")
UPLOADER = _("uploader")
UPLOADER_ID = _("uploader_id")
UPLOADER_URL = _("uploader_url") | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/entries/variables/kwargs.py | kwargs.py |
import abc
from abc import ABC
from typing import Dict
from typing import Generic
from typing import Iterable
from typing import List
from typing import Optional
from typing import Type
from typing import final
from ytdl_sub.config.plugin import BasePlugin
from ytdl_sub.config.plugin import Plugin
from ytdl_sub.config.preset_options import Overrides
from ytdl_sub.config.preset_options import TOptionsValidator
from ytdl_sub.downloaders.ytdl_options_builder import YTDLOptionsBuilder
from ytdl_sub.entries.entry import Entry
from ytdl_sub.ytdl_additions.enhanced_download_archive import EnhancedDownloadArchive
class SourcePluginExtension(Plugin[TOptionsValidator], ABC):
"""
Plugins that get added automatically by using a downloader. Downloader options
are the plugin options.
"""
@final
def ytdl_options(self) -> Optional[Dict]:
"""
SourcePluginExtensions are intended to run after downloading. ytdl_options at that point
are not needed.
"""
return None
class SourcePlugin(BasePlugin[TOptionsValidator], Generic[TOptionsValidator], ABC):
plugin_extensions: List[Type[SourcePluginExtension]] = []
def __init__(
self,
options: TOptionsValidator,
enhanced_download_archive: EnhancedDownloadArchive,
download_ytdl_options: YTDLOptionsBuilder,
metadata_ytdl_options: YTDLOptionsBuilder,
overrides: Overrides,
):
super().__init__(
options=options,
overrides=overrides,
enhanced_download_archive=enhanced_download_archive,
)
self._download_ytdl_options_builder = download_ytdl_options
self._metadata_ytdl_options_builder = metadata_ytdl_options
@abc.abstractmethod
def download_metadata(self) -> Iterable[Entry]:
"""Gathers metadata of all entries to download"""
@abc.abstractmethod
def download(self, entry: Entry) -> Entry:
"""The function to perform the download of all media entries"""
@final
def added_plugins(self) -> List[SourcePluginExtension]:
"""Add these plugins from the Downloader to the subscription"""
return [
plugin_extension(
options=self.plugin_options,
overrides=self.overrides,
enhanced_download_archive=self._enhanced_download_archive,
)
for plugin_extension in self.plugin_extensions
] | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/downloaders/source_plugin.py | source_plugin.py |
import contextlib
import copy
import json
import os
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
import yt_dlp as ytdl
from yt_dlp.utils import ExistingVideoReached
from yt_dlp.utils import MaxDownloadsReached
from yt_dlp.utils import RejectedVideoReached
from ytdl_sub.thread.log_entries_downloaded_listener import LogEntriesDownloadedListener
from ytdl_sub.utils.exceptions import FileNotDownloadedException
from ytdl_sub.utils.logger import Logger
class YTDLP:
_EXTRACT_ENTRY_NUM_RETRIES: int = 5
_EXTRACT_ENTRY_RETRY_WAIT_SEC: int = 5
logger = Logger.get(name="yt-dlp-downloader")
@classmethod
@contextmanager
def ytdlp_downloader(cls, ytdl_options_overrides: Dict) -> ytdl.YoutubeDL:
"""
Context manager to interact with yt_dlp.
"""
cls.logger.debug("ytdl_options: %s", str(ytdl_options_overrides))
with Logger.handle_external_logs(name="yt-dlp"):
# Deep copy ytdl_options in case yt-dlp modifies the dict
with ytdl.YoutubeDL(copy.deepcopy(ytdl_options_overrides)) as ytdl_downloader:
yield ytdl_downloader
@classmethod
def extract_info(cls, ytdl_options_overrides: Dict, **kwargs) -> Dict:
"""
Wrapper around yt_dlp.YoutubeDL.YoutubeDL.extract_info
All kwargs will passed to the extract_info function.
Parameters
----------
ytdl_options_overrides
Optional. Dict containing ytdl args to override other predefined ytdl args
**kwargs
arguments passed directory to YoutubeDL extract_info
"""
with cls.ytdlp_downloader(ytdl_options_overrides) as ytdlp:
return ytdlp.extract_info(**kwargs)
@classmethod
def extract_info_with_retry(
cls,
ytdl_options_overrides: Dict,
is_downloaded_fn: Optional[Callable[[], bool]] = None,
is_thumbnail_downloaded_fn: Optional[Callable[[], bool]] = None,
**kwargs,
) -> Dict:
"""
Wrapper around yt_dlp.YoutubeDL.YoutubeDL.extract_info
All kwargs will passed to the extract_info function.
This should be used when downloading a single entry. Checks if the entry's video
and thumbnail files exist - retry if they do not.
Parameters
----------
ytdl_options_overrides
Dict containing ytdl args to override other predefined ytdl args
is_downloaded_fn
Optional. Function to check if the entry is downloaded
is_thumbnail_downloaded_fn
Optional. Function to check if the entry thumbnail is downloaded
**kwargs
arguments passed directory to YoutubeDL extract_info
Raises
------
FileNotDownloadedException
If the entry fails to download
"""
num_tries = 0
copied_ytdl_options_overrides = copy.deepcopy(ytdl_options_overrides)
is_downloaded = False
entry_dict: Optional[Dict] = None
while num_tries < cls._EXTRACT_ENTRY_NUM_RETRIES:
entry_dict = cls.extract_info(
ytdl_options_overrides=copied_ytdl_options_overrides, **kwargs
)
is_downloaded = is_downloaded_fn is None or is_downloaded_fn()
is_thumbnail_downloaded = (
is_thumbnail_downloaded_fn is None or is_thumbnail_downloaded_fn()
)
if is_downloaded and is_thumbnail_downloaded:
return entry_dict
# Always add check_formats
# See https://github.com/yt-dlp/yt-dlp/issues/502
copied_ytdl_options_overrides["check_formats"] = True
# If the video file is downloaded but the thumbnail is not, then do not download
# the video again
if is_downloaded and not is_thumbnail_downloaded:
copied_ytdl_options_overrides["skip_download"] = True
copied_ytdl_options_overrides["writethumbnail"] = True
time.sleep(cls._EXTRACT_ENTRY_RETRY_WAIT_SEC)
num_tries += 1
# Remove the download archive to retry without thinking its already downloaded,
# even though it is not
if "download_archive" in copied_ytdl_options_overrides:
del copied_ytdl_options_overrides["download_archive"]
if num_tries < cls._EXTRACT_ENTRY_NUM_RETRIES:
cls.logger.debug(
"Failed to download entry. Retrying %d / %d",
num_tries,
cls._EXTRACT_ENTRY_NUM_RETRIES,
)
# Still return if the media file downloaded (thumbnail could be missing)
if is_downloaded and entry_dict is not None:
return entry_dict
error_dict = {"ytdl_options": ytdl_options_overrides, "kwargs": kwargs}
raise FileNotDownloadedException(
f"yt-dlp failed to download an entry with these arguments: {error_dict}"
)
@classmethod
def _get_entry_dicts_from_info_json_files(cls, working_directory: str) -> List[Dict]:
"""
Parameters
----------
working_directory
Directory that info json files are located
Returns
-------
List of all info.json files read as JSON dicts
"""
entry_dicts: List[Dict] = []
info_json_paths = [
Path(working_directory) / file_name
for file_name in os.listdir(working_directory)
if file_name.endswith(".info.json")
]
for info_json_path in info_json_paths:
with open(info_json_path, "r", encoding="utf-8") as file:
entry_dicts.append(json.load(file))
return entry_dicts
@classmethod
@contextlib.contextmanager
def _listen_and_log_downloaded_info_json(
cls, working_directory: str, log_prefix: Optional[str]
):
"""
Context manager that starts a separate thread that listens for new .info.json files,
prints their titles as they appear
"""
if not log_prefix:
yield
return
info_json_listener = LogEntriesDownloadedListener(
working_directory=working_directory,
log_prefix=log_prefix,
)
info_json_listener.start()
try:
yield
finally:
info_json_listener.complete = True
@classmethod
def extract_info_via_info_json(
cls,
working_directory: str,
ytdl_options_overrides: Dict,
log_prefix_on_info_json_dl: Optional[str] = None,
**kwargs,
) -> List[Dict]:
"""
Wrapper around yt_dlp.YoutubeDL.YoutubeDL.extract_info with infojson enabled. Entry dicts
are extracted via reading all info.json files in the working directory rather than
from the output of extract_info.
This allows us to catch RejectedVideoReached and ExistingVideoReached exceptions, and
simply ignore while still being able to read downloaded entry metadata.
Parameters
----------
working_directory
Directory that info json files reside in
ytdl_options_overrides
Dict containing ytdl args to override other predefined ytdl args
log_prefix_on_info_json_dl
Optional. Spin a new thread to listen for new info.json files. Log
f'{log_prefix_on_info_json_dl} {title}' when a new one appears
**kwargs
arguments passed directory to YoutubeDL extract_info
"""
try:
with cls._listen_and_log_downloaded_info_json(
working_directory=working_directory, log_prefix=log_prefix_on_info_json_dl
):
_ = cls.extract_info(ytdl_options_overrides=ytdl_options_overrides, **kwargs)
except RejectedVideoReached:
cls.logger.debug(
"RejectedVideoReached, stopping additional downloads "
"(Can be disable by setting `ytdl_options.break_on_reject` to False)."
)
except ExistingVideoReached:
cls.logger.debug(
"ExistingVideoReached, stopping additional downloads. "
"(Can be disable by setting `ytdl_options.break_on_existing` to False)."
)
except MaxDownloadsReached:
cls.logger.info("MaxDownloadsReached, stopping additional downloads.")
return cls._get_entry_dicts_from_info_json_files(working_directory=working_directory) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/downloaders/ytdlp.py | ytdlp.py |
import copy
import json
from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import List
from ytdl_sub.config.preset_options import OptionsDictValidator
from ytdl_sub.config.preset_options import Overrides
from ytdl_sub.downloaders.source_plugin import SourcePlugin
from ytdl_sub.downloaders.ytdl_options_builder import YTDLOptionsBuilder
from ytdl_sub.entries.entry import Entry
from ytdl_sub.utils.exceptions import ValidationException
from ytdl_sub.utils.file_handler import FileHandler
from ytdl_sub.utils.file_handler import get_file_extension
from ytdl_sub.ytdl_additions.enhanced_download_archive import DownloadMapping
from ytdl_sub.ytdl_additions.enhanced_download_archive import EnhancedDownloadArchive
class InfoJsonDownloaderOptions(OptionsDictValidator):
_optional_keys = {"no-op"}
class InfoJsonDownloader(SourcePlugin[InfoJsonDownloaderOptions]):
plugin_options_type = InfoJsonDownloaderOptions
def __init__(
self,
options: InfoJsonDownloaderOptions,
enhanced_download_archive: EnhancedDownloadArchive,
download_ytdl_options: YTDLOptionsBuilder,
metadata_ytdl_options: YTDLOptionsBuilder,
overrides: Overrides,
):
super().__init__(
options=options,
enhanced_download_archive=enhanced_download_archive,
download_ytdl_options=download_ytdl_options,
metadata_ytdl_options=metadata_ytdl_options,
overrides=overrides,
)
# Keep track of original file mappings for the 'mock' download
self._original_entry_mappings = copy.deepcopy(
enhanced_download_archive.mapping.entry_mappings
)
@property
def output_directory(self) -> str:
"""
Returns
-------
The output directory
"""
return self._enhanced_download_archive.output_directory
@property
def _entry_mappings(self) -> Dict[str, DownloadMapping]:
"""
Returns
-------
The up-to-date entry mappings
"""
return self._enhanced_download_archive.mapping.entry_mappings
def _get_entry_from_download_mapping(self, download_mapping: DownloadMapping):
"""
Try to load an entry from a download mapping's info json
"""
for file_name in download_mapping.file_names:
if file_name.endswith(".info.json"):
try:
with open(
Path(self.output_directory) / file_name, "r", encoding="utf-8"
) as maybe_info_json:
entry_dict = json.load(maybe_info_json)
except Exception as exc:
raise ValidationException(
"info.json file cannot be loaded - subscription cannot be reformatted"
) from exc
return Entry(
entry_dict=entry_dict,
working_directory=self.working_directory,
)
raise ValidationException(
"info.json file could not be found - subscription cannot be reformatted"
)
def download_metadata(self) -> Iterable[Entry]:
"""
Loads all entries via their info.json files first (to ensure they are all valid), then
iterates them
"""
entries: List[Entry] = []
for download_mapping in self._enhanced_download_archive.mapping.entry_mappings.values():
entry = self._get_entry_from_download_mapping(download_mapping)
entries.append(entry)
# Remove each entry from the live download archive since it will get re-added
# unless it is filtered
for entry in entries:
self._enhanced_download_archive.mapping.remove_entry(entry.uid)
for entry in sorted(entries, key=lambda ent: ent.download_index):
yield entry
# If the original entry file_path is no longer maintained in the new mapping, then
# delete it
num_original_files_deleted: int = 0
for file_name in self._original_entry_mappings[entry.uid].file_names:
if (
entry.uid not in self._entry_mappings
or file_name not in self._entry_mappings[entry.uid].file_names
):
num_original_files_deleted += 1
self._enhanced_download_archive.delete_file_from_output_directory(file_name)
# If all original entry files are deleted, mark it as deleted
if num_original_files_deleted == len(
self._original_entry_mappings[entry.uid].file_names
):
self._enhanced_download_archive.num_entries_removed += 1
def download(self, entry: Entry) -> Entry:
"""
Mock the download by copying the entry file from the output directory into
the working directory
"""
# Use original mapping since the live mapping gets wiped
entry_file_names = self._original_entry_mappings[entry.uid].file_names
for file_name in entry_file_names:
ext = get_file_extension(file_name)
file_path = Path(self.output_directory) / file_name
working_directory_file_path = Path(self.working_directory) / f"{entry.uid}.{ext}"
# NFO files will always get rewritten, so ignore
if ext == "nfo":
continue
if not self.is_dry_run:
FileHandler.copy(
src_file_path=file_path,
dst_file_path=working_directory_file_path,
)
return entry | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/downloaders/info_json/info_json_downloader.py | info_json_downloader.py |
import copy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from ytdl_sub.config.preset_options import OptionsValidator
from ytdl_sub.validators.strict_dict_validator import StrictDictValidator
from ytdl_sub.validators.string_formatter_validators import DictFormatterValidator
from ytdl_sub.validators.string_formatter_validators import OverridesStringFormatterValidator
from ytdl_sub.validators.string_formatter_validators import StringFormatterValidator
from ytdl_sub.validators.validators import BoolValidator
from ytdl_sub.validators.validators import ListValidator
class UrlThumbnailValidator(StrictDictValidator):
_required_keys = {"name", "uid"}
def __init__(self, name, value):
super().__init__(name, value)
self._name = self._validate_key(key="name", validator=StringFormatterValidator)
self._uid = self._validate_key(key="uid", validator=OverridesStringFormatterValidator)
@property
def name(self) -> StringFormatterValidator:
"""
File name for the thumbnail
"""
return self._name
@property
def uid(self) -> OverridesStringFormatterValidator:
"""
yt-dlp's unique ID of the thumbnail
"""
return self._uid
class UrlThumbnailListValidator(ListValidator[UrlThumbnailValidator]):
_inner_list_type = UrlThumbnailValidator
class UrlValidator(StrictDictValidator):
_required_keys = {"url"}
_optional_keys = {"variables", "source_thumbnails", "playlist_thumbnails", "download_reverse"}
@classmethod
def partial_validate(cls, name: str, value: Any) -> None:
"""
Partially validate a YouTube collection url
"""
if isinstance(value, dict):
value["url"] = value.get("url", "placeholder")
_ = cls(name, value)
def __init__(self, name, value):
super().__init__(name, value)
# TODO: url validate using yt-dlp IE
self._url = self._validate_key(key="url", validator=OverridesStringFormatterValidator)
self._variables = self._validate_key_if_present(
key="variables", validator=DictFormatterValidator, default={}
)
self._source_thumbnails = self._validate_key_if_present(
key="source_thumbnails", validator=UrlThumbnailListValidator, default=[]
)
self._playlist_thumbnails = self._validate_key_if_present(
key="playlist_thumbnails", validator=UrlThumbnailListValidator, default=[]
)
self._download_reverse = self._validate_key(
key="download_reverse", validator=BoolValidator, default=True
)
@property
def url(self) -> OverridesStringFormatterValidator:
"""
Required. URL to download from.
"""
return self._url
@property
def variables(self) -> DictFormatterValidator:
"""
Source variables to add to each entry. The top-most collection must define all possible
variables. Collections below can redefine all of them or a subset of the top-most variables.
"""
return self._variables
@property
def playlist_thumbnails(self) -> Optional[UrlThumbnailListValidator]:
"""
Thumbnails to download from the playlist, if any exist. Playlist is the ``yt-dlp`` naming
convention for a set that contains multiple entries. For example, the URL
``https://www.youtube.com/c/RickastleyCoUkOfficial`` would have ``playlist`` refer to the
channel.
Usage:
.. code-block:: yaml
playlist_thumbnails:
- name: "poster.jpg"
uid: "avatar_uncropped"
- name: "fanart.jpg"
uid: "banner_uncropped"
``name`` is the file name relative to the output directory to store the thumbnail.
``uid`` is the yt-dlp thumbnail ID. Can specify ``latest_entry`` to use the latest entry's
thumbnail.
"""
return self._playlist_thumbnails
@property
def source_thumbnails(self) -> Optional[UrlThumbnailListValidator]:
"""
Thumbnails to download from the source, if any exist. Source in this context refers to the
set of sets. For example, the URL
``https://www.youtube.com/c/RickastleyCoUkOfficial/playlists``
would have ``playlist`` refer to each individual playlist, whereas ``source`` refers
to the channel.
Usage:
.. code-block:: yaml
source_thumbnails:
- name: "poster.jpg"
uid: "avatar_uncropped"
- name: "fanart.jpg"
uid: "banner_uncropped"
``name`` is the file name relative to the output directory to store the thumbnail.
``uid`` is the yt-dlp thumbnail ID. Can specify ``latest_entry`` to use the latest entry's
thumbnail.
"""
return self._source_thumbnails
@property
def download_reverse(self) -> bool:
"""
Optional. Whether to download entries in the reverse order of the metadata downloaded.
Defaults to True.
"""
return self._download_reverse.value
class UrlStringOrDictValidator(UrlValidator):
"""
URL validator that supports a single string like:
download:
- "https://"
or
download:
- url: "https://"
"""
_expected_value_type = (dict, str)
def __init__(self, name, value):
super().__init__(name, {"url": value} if isinstance(value, str) else value)
class UrlListValidator(ListValidator[UrlStringOrDictValidator]):
_inner_list_type = UrlStringOrDictValidator
_expected_value_type_name = "collection url list"
def __init__(self, name, value):
super().__init__(name, value)
added_variables: Dict[str, str] = self.list[0].variables.dict_with_format_strings
for idx, url_validator in enumerate(self.list[1:]):
collection_variables = url_validator.variables.dict_with_format_strings
# see if this collection contains new added vars (it should not)
for var in collection_variables.keys():
if var not in added_variables:
raise self._validation_exception(
f"Collection url {idx} contains the variable '{var}' that the first "
f"collection url does not. The first collection url must define all added "
f"variables."
)
# see if this collection is missing any added vars (if so, inherit from the top)
for var in added_variables.keys():
if var not in collection_variables.keys():
collection_variables[var] = added_variables[var]
class MultiUrlValidator(OptionsValidator):
"""
Downloads from multiple URLs. If an entry is returned from more than one URL, it will
resolve to the bottom-most URL settings.
"""
@classmethod
def partial_validate(cls, name: str, value: Any) -> None:
"""
Partially validate a collection
"""
if isinstance(value, dict):
value["url"] = value.get("url", "sadfasdf")
_ = cls(name, value)
def __init__(self, name, value):
super().__init__(name, value)
# Copy since we're popping things
value_copy = copy.deepcopy(value)
if isinstance(value, dict):
# Pop old required field in case it's still there
value_copy.pop("download_strategy", None)
# Deal with old multi-url download strategy
if isinstance(value, dict) and "urls" in value_copy:
self._urls = UrlListValidator(name=name, value=value_copy["urls"])
else:
self._urls = UrlListValidator(name=name, value=value_copy)
@property
def urls(self) -> UrlListValidator:
"""
Required. A list of :ref:`url` with the addition of the ``variables`` attribute.
Multiple URLs should be listed in the order of priority, with the lowest priority being the
top-most, and highest priority being the bottom-most. If a download exists in more than
one URL, it will resolve to the bottom-most one (the highest priority) and
inherit those variables.
"""
return self._urls
@property
def variables(self) -> DictFormatterValidator:
"""
Optional. Source variables to add to each entry downloaded from its respective :ref:`url`.
The top-most :ref:`url` must define all possible variables. Other :ref:`url` entries can
redefine all of them or a subset of the top-most variables.
"""
# keep for readthedocs documentation
return self._urls.list[0].variables
def added_source_variables(self) -> List[str]:
"""
Returns
-------
List of variables added. The first collection url always contains all the variables.
"""
return list(self._urls.list[0].variables.keys)
def validate_with_variables(
self, source_variables: List[str], override_variables: Dict[str, str]
) -> None:
"""
Ensures new variables added are not existing variables
"""
for source_var_name in self.added_source_variables():
if source_var_name in source_variables:
raise self._validation_exception(
f"'{source_var_name}' cannot be used as a variable name because it "
f"is an existing source variable"
)
base_variables = dict(
override_variables, **{source_var: "dummy_string" for source_var in source_variables}
)
# Apply formatting to each new source variable, ensure it resolves
for collection_url in self.urls.list:
for (
source_var_name,
source_var_formatter_str,
) in collection_url.variables.dict_with_format_strings.items():
_ = StringFormatterValidator(
name=f"{self._name}.{source_var_name}", value=source_var_formatter_str
).apply_formatter(base_variables)
# Ensure at least URL is non-empty
has_non_empty_url = False
for url_validator in self.urls.list:
has_non_empty_url |= bool(url_validator.url.apply_formatter(override_variables))
if not has_non_empty_url:
raise self._validation_exception("Must contain at least one url that is non-empty") | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/downloaders/url/validators.py | validators.py |
import contextlib
import os
from pathlib import Path
from typing import Dict
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
from yt_dlp.utils import RejectedVideoReached
from ytdl_sub.config.plugin import PluginPriority
from ytdl_sub.config.preset_options import Overrides
from ytdl_sub.downloaders.source_plugin import SourcePlugin
from ytdl_sub.downloaders.source_plugin import SourcePluginExtension
from ytdl_sub.downloaders.url.validators import MultiUrlValidator
from ytdl_sub.downloaders.url.validators import UrlThumbnailListValidator
from ytdl_sub.downloaders.url.validators import UrlValidator
from ytdl_sub.downloaders.ytdl_options_builder import YTDLOptionsBuilder
from ytdl_sub.downloaders.ytdlp import YTDLP
from ytdl_sub.entries.entry import Entry
from ytdl_sub.entries.entry_parent import EntryParent
from ytdl_sub.entries.variables.kwargs import COLLECTION_URL
from ytdl_sub.entries.variables.kwargs import COMMENTS
from ytdl_sub.entries.variables.kwargs import DOWNLOAD_INDEX
from ytdl_sub.entries.variables.kwargs import PLAYLIST_ENTRY
from ytdl_sub.entries.variables.kwargs import REQUESTED_SUBTITLES
from ytdl_sub.entries.variables.kwargs import SOURCE_ENTRY
from ytdl_sub.entries.variables.kwargs import SPONSORBLOCK_CHAPTERS
from ytdl_sub.entries.variables.kwargs import UPLOAD_DATE_INDEX
from ytdl_sub.entries.variables.kwargs import YTDL_SUB_MATCH_FILTER_REJECT
from ytdl_sub.utils.file_handler import FileHandler
from ytdl_sub.utils.logger import Logger
from ytdl_sub.utils.thumbnail import ThumbnailTypes
from ytdl_sub.utils.thumbnail import download_and_convert_url_thumbnail
from ytdl_sub.utils.thumbnail import try_convert_download_thumbnail
from ytdl_sub.ytdl_additions.enhanced_download_archive import EnhancedDownloadArchive
download_logger = Logger.get(name="downloader")
class URLDownloadState:
def __init__(self, entries_total: int):
self.entries_total = entries_total
self.entries_downloaded = 0
class UrlDownloaderThumbnailPlugin(SourcePluginExtension):
priority = PluginPriority(modify_entry=0)
def __init__(
self,
options: MultiUrlValidator,
overrides: Overrides,
enhanced_download_archive: EnhancedDownloadArchive,
):
super().__init__(
options=options,
overrides=overrides,
enhanced_download_archive=enhanced_download_archive,
)
self._thumbnails_downloaded: Set[str] = set()
self._collection_url_mapping: Dict[str, UrlValidator] = {
self.overrides.apply_formatter(collection_url.url): collection_url
for collection_url in options.urls.list
}
def _download_parent_thumbnails(
self,
thumbnail_list_info: UrlThumbnailListValidator,
entry: Entry,
parent: EntryParent,
) -> None:
"""
Downloads and moves channel avatar and banner images to the output directory.
"""
for thumbnail_info in thumbnail_list_info.list:
thumbnail_name = self.overrides.apply_formatter(thumbnail_info.name, entry=entry)
thumbnail_id = self.overrides.apply_formatter(thumbnail_info.uid)
# If latest entry, always update the thumbnail on each entry
if thumbnail_id == ThumbnailTypes.LATEST_ENTRY:
# always save in dry-run even if it doesn't exist...
if self.is_dry_run or entry.is_thumbnail_downloaded():
self.save_file(
file_name=entry.get_download_thumbnail_name(),
output_file_name=thumbnail_name,
copy_file=True,
)
self._thumbnails_downloaded.add(thumbnail_name)
continue
# If not latest entry and the thumbnail has already been downloaded, then skip
if thumbnail_name in self._thumbnails_downloaded:
continue
if (thumbnail_url := parent.get_thumbnail_url(thumbnail_id=thumbnail_id)) is None:
download_logger.debug("Failed to find thumbnail id '%s'", thumbnail_id)
continue
if download_and_convert_url_thumbnail(
thumbnail_url=thumbnail_url,
output_thumbnail_path=str(Path(self.working_directory) / thumbnail_name),
):
self.save_file(file_name=thumbnail_name)
self._thumbnails_downloaded.add(thumbnail_name)
else:
download_logger.debug("Failed to download thumbnail id '%s'", thumbnail_id)
def _download_url_thumbnails(self, collection_url: UrlValidator, entry: Entry):
"""
After all media entries have been downloaded, post processed, and moved to the output
directory, run this function. This lets the downloader add any extra files directly to the
output directory, for things like YT channel image, banner.
"""
if entry.kwargs_contains(PLAYLIST_ENTRY):
self._download_parent_thumbnails(
thumbnail_list_info=collection_url.playlist_thumbnails,
entry=entry,
parent=EntryParent(
entry.kwargs(PLAYLIST_ENTRY), working_directory=self.working_directory
),
)
if entry.kwargs_contains(SOURCE_ENTRY):
self._download_parent_thumbnails(
thumbnail_list_info=collection_url.source_thumbnails,
entry=entry,
parent=EntryParent(
entry.kwargs(SOURCE_ENTRY), working_directory=self.working_directory
),
)
def modify_entry(self, entry: Entry) -> Optional[Entry]:
"""
Use the entry to download thumbnails (or move if LATEST_ENTRY).
In addition, convert the entry thumbnail to jpg
"""
# We always convert entry thumbnails to jpgs, and is performed here to be done
# as early as possible in the plugin pipeline (downstream plugins depend on it being jpg)
if not self.is_dry_run:
try_convert_download_thumbnail(entry=entry)
if entry.kwargs_get(COLLECTION_URL) in self._collection_url_mapping:
self._download_url_thumbnails(
collection_url=self._collection_url_mapping[entry.kwargs(COLLECTION_URL)],
entry=entry,
)
return entry
class UrlDownloaderCollectionVariablePlugin(SourcePluginExtension):
priority = PluginPriority(modify_entry_metadata=0)
def __init__(
self,
options: MultiUrlValidator,
overrides: Overrides,
enhanced_download_archive: EnhancedDownloadArchive,
):
super().__init__(
options=options,
overrides=overrides,
enhanced_download_archive=enhanced_download_archive,
)
self._thumbnails_downloaded: Set[str] = set()
self._collection_url_mapping: Dict[str, UrlValidator] = {
self.overrides.apply_formatter(collection_url.url): collection_url
for collection_url in options.urls.list
}
def modify_entry_metadata(self, entry: Entry) -> Optional[Entry]:
"""
Add collection variables to the entry
"""
# COLLECTION_URL is a recent variable that may not exist for old entries when updating.
# Try to use source_webpage_url if it does not exist
entry_collection_url = entry.kwargs_get(COLLECTION_URL, entry.source_webpage_url)
# If the collection URL cannot find its mapping, use the last URL
collection_url = (
self._collection_url_mapping.get(entry_collection_url)
or list(self._collection_url_mapping.values())[-1]
)
entry.add_variables(variables_to_add=collection_url.variables.dict_with_format_strings)
return entry
class MultiUrlDownloader(SourcePlugin[MultiUrlValidator]):
"""
Class that interacts with ytdl to perform the download of metadata and content,
and should translate that to list of Entry objects.
"""
plugin_options_type = MultiUrlValidator
plugin_extensions = [UrlDownloaderThumbnailPlugin, UrlDownloaderCollectionVariablePlugin]
@classmethod
def ytdl_option_defaults(cls) -> Dict:
"""
.. code-block:: yaml
ytdl_options:
ignoreerrors: True # ignore errors like hidden videos, age restriction, etc
"""
return {"ignoreerrors": True}
def __init__(
self,
options: MultiUrlValidator,
enhanced_download_archive: EnhancedDownloadArchive,
download_ytdl_options: YTDLOptionsBuilder,
metadata_ytdl_options: YTDLOptionsBuilder,
overrides: Overrides,
):
"""
Parameters
----------
options
Options validator for this downloader
enhanced_download_archive
Download archive
download_ytdl_options
YTDL options builder for downloading media
metadata_ytdl_options
YTDL options builder for downloading metadata
overrides
Override variables
"""
super().__init__(
options=options,
enhanced_download_archive=enhanced_download_archive,
download_ytdl_options=download_ytdl_options,
metadata_ytdl_options=metadata_ytdl_options,
overrides=overrides,
)
self._downloaded_entries: Set[str] = set()
self._url_state: Optional[URLDownloadState] = None
@property
def download_ytdl_options(self) -> Dict:
"""
Returns
-------
YTLD options dict for downloading
"""
return (
self._download_ytdl_options_builder.clone()
.add(self.ytdl_option_defaults(), before=True)
.to_dict()
)
@property
def metadata_ytdl_options(self) -> Dict:
"""
Returns
-------
YTDL options dict for fetching metadata
"""
return (
self._metadata_ytdl_options_builder.clone()
.add(self.ytdl_option_defaults(), before=True)
.to_dict()
)
@property
def is_dry_run(self) -> bool:
"""
Returns
-------
True if dry-run is enabled. False otherwise.
"""
return self.download_ytdl_options.get("skip_download", False)
@property
def is_entry_thumbnails_enabled(self) -> bool:
"""
Returns
-------
True if entry thumbnails should be downloaded. False otherwise.
"""
return self.download_ytdl_options.get("writethumbnail", False)
###############################################################################################
# DOWNLOAD FUNCTIONS
def _is_downloaded(self, entry: Entry) -> bool:
return entry.ytdl_uid() in self._downloaded_entries
def _mark_downloaded(self, entry: Entry) -> None:
self._downloaded_entries.add(entry.ytdl_uid())
@property
def collection(self) -> MultiUrlValidator:
"""Return the download options collection"""
return self.plugin_options
@contextlib.contextmanager
def _separate_download_archives(self, clear_info_json_files: bool = False):
"""
Separate download archive writing between collection urls. This is so break_on_existing
does not break when downloading from subset urls.
Parameters
----------
clear_info_json_files
Whether to delete info.json files after yield
"""
archive_path = self.download_ytdl_options.get("download_archive", "")
backup_archive_path = f"{archive_path}.backup"
# If archive path exists, maintain download archive is enable
if archive_file_exists := archive_path and os.path.isfile(archive_path):
archive_file_exists = True
# If a backup exists, it's the one prior to any downloading, use that.
if os.path.isfile(backup_archive_path):
FileHandler.copy(src_file_path=backup_archive_path, dst_file_path=archive_path)
# If not, create the backup
else:
FileHandler.copy(src_file_path=archive_path, dst_file_path=backup_archive_path)
yield
# If an archive path did not exist at first, but now exists, delete it
if not archive_file_exists and os.path.isfile(archive_path):
FileHandler.delete(file_path=archive_path)
# If the archive file did exist, restore the backup
elif archive_file_exists:
FileHandler.move(src_file_path=backup_archive_path, dst_file_path=archive_path)
# Clear info json files if true
if clear_info_json_files:
info_json_files = [
Path(self.working_directory) / path
for path in os.listdir(self.working_directory)
if path.endswith(".info.json")
]
for info_json_file in info_json_files:
FileHandler.delete(info_json_file)
def _extract_entry_info_with_retry(self, entry: Entry) -> Entry:
download_entry_dict = YTDLP.extract_info_with_retry(
ytdl_options_overrides=self.download_ytdl_options,
is_downloaded_fn=None if self.is_dry_run else entry.is_downloaded,
is_thumbnail_downloaded_fn=None
if (self.is_dry_run or not self.is_entry_thumbnails_enabled)
else entry.is_thumbnail_downloaded_via_ytdlp,
url=entry.webpage_url,
)
return Entry(download_entry_dict, working_directory=self.working_directory)
def _iterate_child_entries(
self, url_validator: UrlValidator, entries: List[Entry]
) -> Iterator[Entry]:
entries_to_iterate = entries
if url_validator.download_reverse:
entries_to_iterate = reversed(entries)
for entry in entries_to_iterate:
self._url_state.entries_downloaded += 1
if self._is_downloaded(entry):
download_logger.info(
"Already downloaded entry %d/%d: %s",
self._url_state.entries_downloaded,
self._url_state.entries_total,
entry.title,
)
continue
yield entry
self._mark_downloaded(entry)
def _iterate_parent_entry(
self, url_validator: UrlValidator, parent: EntryParent
) -> Iterator[Entry]:
for entry_child in self._iterate_child_entries(
url_validator=url_validator, entries=parent.entry_children()
):
yield entry_child
# Recursion the parent's parent entries
for parent_child in reversed(parent.parent_children()):
for entry_child in self._iterate_parent_entry(
url_validator=url_validator, parent=parent_child
):
yield entry_child
def _download_url_metadata(self, url: str) -> Tuple[List[EntryParent], List[Entry]]:
"""
Downloads only info.json files and forms EntryParent trees
"""
with self._separate_download_archives():
entry_dicts = YTDLP.extract_info_via_info_json(
working_directory=self.working_directory,
ytdl_options_overrides=self.metadata_ytdl_options,
log_prefix_on_info_json_dl="Downloading metadata for",
url=url,
)
parents = EntryParent.from_entry_dicts(
url=url,
entry_dicts=entry_dicts,
working_directory=self.working_directory,
)
orphans = EntryParent.from_entry_dicts_with_no_parents(
parents=parents, entry_dicts=entry_dicts, working_directory=self.working_directory
)
return parents, orphans
def _iterate_entries(
self,
url_validator: UrlValidator,
parents: List[EntryParent],
orphans: List[Entry],
) -> Iterator[Entry]:
"""
Downloads the leaf entries from EntryParent trees
"""
# Delete info json files afterwards so other collection URLs do not use them
with self._separate_download_archives(clear_info_json_files=True):
for parent in parents:
for entry_child in self._iterate_parent_entry(
url_validator=url_validator, parent=parent
):
yield entry_child
for orphan in self._iterate_child_entries(url_validator=url_validator, entries=orphans):
yield orphan
def download_metadata(self) -> Iterable[Entry]:
"""The function to perform the download of all media entries"""
# download the bottom-most urls first since they are top-priority
for collection_url in reversed(self.collection.urls.list):
# URLs can be empty. If they are, then skip
if not (url := self.overrides.apply_formatter(collection_url.url)):
continue
parents, orphan_entries = self._download_url_metadata(url=url)
# TODO: Encapsulate this logic into its own class
self._url_state = URLDownloadState(
entries_total=sum(parent.num_children() for parent in parents) + len(orphan_entries)
)
download_logger.info(
"Beginning downloads for %s", self.overrides.apply_formatter(collection_url.url)
)
for entry in self._iterate_entries(
url_validator=collection_url, parents=parents, orphans=orphan_entries
):
# Add the collection URL to the info_dict to trace where it came from
entry.add_kwargs(
{COLLECTION_URL: self.overrides.apply_formatter(collection_url.url)}
)
yield entry
def download(self, entry: Entry) -> Entry:
"""
Parameters
----------
entry
Entry to download
Returns
-------
The entry that was downloaded successfully
Raises
------
RejectedVideoReached
If a video was rejected and was not from match_filter
"""
download_logger.info(
"Downloading entry %d/%d: %s",
self._url_state.entries_downloaded,
self._url_state.entries_total,
entry.title,
)
# Match-filters are applied at the download stage (not metadata stage).
# If the download is rejected, and match_filter is present in the ytdl options,
# then filter downstream in the match_filter plugin
try:
download_entry = self._extract_entry_info_with_retry(entry=entry)
except RejectedVideoReached:
if "match_filter" in self.download_ytdl_options:
entry.add_kwargs({YTDL_SUB_MATCH_FILTER_REJECT: True})
return entry
raise
upload_date_idx = self._enhanced_download_archive.mapping.get_num_entries_with_upload_date(
upload_date_standardized=entry.upload_date_standardized
)
download_idx = self._enhanced_download_archive.num_entries
entry.add_kwargs(
{
# Subtitles are not downloaded in metadata run, only here, so move over
REQUESTED_SUBTITLES: download_entry.kwargs_get(REQUESTED_SUBTITLES),
# Same with sponsorblock chapters
SPONSORBLOCK_CHAPTERS: download_entry.kwargs_get(SPONSORBLOCK_CHAPTERS),
COMMENTS: download_entry.kwargs_get(COMMENTS),
# Tracks number of entries downloaded
DOWNLOAD_INDEX: download_idx,
# Tracks number of entries with the same upload date to make them unique
UPLOAD_DATE_INDEX: upload_date_idx,
}
)
return entry | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/downloaders/url/downloader.py | downloader.py |
import re
from typing import List
from typing import Optional
from ytdl_sub.validators.validators import ListValidator
from ytdl_sub.validators.validators import StringValidator
class RegexValidator(StringValidator):
_expected_value_type_name = "regex"
def __init__(self, name, value):
super().__init__(name, value)
try:
self._compiled_regex = re.compile(self.value)
except Exception as exc:
raise self._validation_exception(
error_message=f"invalid regex: '{self.value}'"
) from exc
@property
def num_capture_groups(self) -> int:
"""
Returns
-------
Number of capture groups in the regex
"""
return self._compiled_regex.groups
@property
def compiled_regex(self) -> re.Pattern:
"""
Returns
-------
The regex compiled
"""
return self._compiled_regex
def match(self, input_str: str) -> Optional[List[str]]:
"""
Parameters
----------
input_str
String to regex match
Returns
-------
List of captures. If the regex has no capture groups, then the list will be emtpy.
None is returned if the input_str failed to match
"""
if match := self._compiled_regex.search(input_str):
return list(match.groups())
return None
class RegexListValidator(ListValidator[RegexValidator]):
_expected_value_type_name = "regex list"
_inner_list_type = RegexValidator
def __init__(self, name, value):
super().__init__(name, value)
if len(set(reg.num_capture_groups for reg in self._list)) > 1:
raise self._validation_exception(
"each regex in a list must have the same number of capture groups"
)
self._num_capture_groups = self._list[0].num_capture_groups
@property
def num_capture_groups(self) -> int:
"""
Returns
-------
Number of capture groups. All regexes in the list will have the same number.
"""
return self._num_capture_groups
def match_any(self, input_str: str) -> Optional[List[str]]:
"""
Parameters
----------
input_str
String to try to regex capture from any of the regexes in the list
Returns
-------
List of captures on the first regex that matches. If the regex has no capture groups, then
the list will be emtpy. None is returned if the input_str failed to match
"""
for reg in self._list:
if (maybe_capture := reg.match(input_str)) is not None:
return maybe_capture
return None | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/validators/regex_validator.py | regex_validator.py |
import re
from collections import OrderedDict
from keyword import iskeyword
from typing import Dict
from typing import List
from typing import final
from yt_dlp.utils import sanitize_filename
from ytdl_sub.utils.exceptions import InvalidVariableNameException
from ytdl_sub.utils.exceptions import StringFormattingException
from ytdl_sub.utils.exceptions import StringFormattingVariableNotFoundException
from ytdl_sub.validators.validators import ListValidator
from ytdl_sub.validators.validators import LiteralDictValidator
from ytdl_sub.validators.validators import Validator
_fields_validator = re.compile(r"{([a-z][a-z0-9_]+?)}")
_fields_validator_exception_message: str = (
"{variable_names} must start with a lowercase letter, should only contain lowercase letters, "
"numbers, underscores, and have a single open and close bracket."
)
def is_valid_source_variable_name(input_str: str, raise_exception: bool = False) -> bool:
"""
Parameters
----------
input_str
String to see if it can be a source variable
raise_exception
Raise InvalidVariableNameException False.
Returns
-------
True if it is. False otherwise.
Raises
------
InvalidVariableNameException
If raise_exception and output is False
"""
# Add brackets around it to pretend its a StringFormatter, see if it captures
is_source_variable_name = len(re.findall(_fields_validator, f"{{{input_str}}}")) > 0
if not is_source_variable_name and raise_exception:
raise InvalidVariableNameException(_fields_validator_exception_message)
return is_source_variable_name
class StringFormatterValidator(Validator):
"""
String that can use
:class:`source variables <ytdl_sub.entries.variables.entry_variables.SourceVariables>`
and
:class:`overrides <ytdl_sub.config.preset_options.Overrides>`
for populating things like file paths and metadata.
.. code-block:: python
"{tv_show_file_name}.s{upload_year}.e{upload_month}{upload_day_padded}.{ext}"
is valid when using
:class:`youtube variables <ytdl_sub.entries.variables.youtube_variables.YoutubeVideoVariables>`
with the following overrides:
.. code-block:: yaml
presets:
my_example_preset:
overrides:
tv_show_file_name: "sweet_tv_show"
and would resolve to something like ``sweet_tv_show.s2022.e502.mp4``.
"""
_expected_value_type = str
_expected_value_type_name = "format string"
_variable_not_found_error_msg_formatter = (
"Format variable '{variable_name}' does not exist. Available variables: {available_fields}"
)
_max_format_recursion = 8
def __validate_and_get_format_variables(self) -> List[str]:
"""
Returns
-------
list[str]
List of format variables in the format string
Raises
------
ValidationException
If the format string contains invalid variable formatting
"""
open_bracket_count = self.format_string.count("{")
close_bracket_count = self.format_string.count("}")
if open_bracket_count != close_bracket_count:
raise self._validation_exception(
"Brackets are reserved for {variable_names} and should contain "
"a single open and close bracket.",
exception_class=StringFormattingException,
)
format_variables: List[str] = list(re.findall(_fields_validator, self.format_string))
if len(format_variables) != open_bracket_count:
raise self._validation_exception(
error_message=_fields_validator_exception_message,
exception_class=StringFormattingException,
)
for variable in format_variables:
if iskeyword(variable):
raise self._validation_exception(
f"'{variable}' is a Python keyword and cannot be used as a variable.",
exception_class=StringFormattingException,
)
return format_variables
def __init__(self, name, value: str):
super().__init__(name=name, value=value)
self.format_variables = self.__validate_and_get_format_variables()
@final
@property
def format_string(self) -> str:
"""
Returns
-------
The literal format string, unformatted.
"""
return self._value
def _apply_formatter(
self, formatter: "StringFormatterValidator", variable_dict: Dict[str, str]
) -> "StringFormatterValidator":
# Ensure the variable names exist within the entry and overrides
for variable_name in formatter.format_variables:
# If the variable exists, but is sanitized...
if (
variable_name.endswith("_sanitized")
and variable_name.removesuffix("_sanitized") in variable_dict
):
# Resolve just the non-sanitized version, then sanitize it
variable_dict[variable_name] = sanitize_filename(
StringFormatterValidator(
name=self._name, value=f"{{{variable_name.removesuffix('_sanitized')}}}"
).apply_formatter(variable_dict)
)
# If the variable doesn't exist, error
elif variable_name not in variable_dict:
available_fields = ", ".join(sorted(variable_dict.keys()))
raise self._validation_exception(
self._variable_not_found_error_msg_formatter.format(
variable_name=variable_name, available_fields=available_fields
),
exception_class=StringFormattingVariableNotFoundException,
)
return StringFormatterValidator(
name=self._name,
value=formatter.format_string.format(**OrderedDict(variable_dict)),
)
def apply_formatter(self, variable_dict: Dict[str, str]) -> str:
"""
Calls `format` on the format string using the variable_dict as input kwargs
Parameters
----------
variable_dict
kwargs to pass to the format string
Returns
-------
Format string formatted
"""
formatter = self
recursion_depth = 0
max_depth = self._max_format_recursion
while formatter.format_variables and recursion_depth < max_depth:
formatter = self._apply_formatter(formatter=formatter, variable_dict=variable_dict)
recursion_depth += 1
if formatter.format_variables:
raise self._validation_exception(
f"Attempted to format but failed after reaching max recursion depth of "
f"{max_depth}. Try to keep variables dependent on only one other variable at max. "
f"Unresolved variables: {', '.join(sorted(formatter.format_variables))}",
exception_class=StringFormattingException,
)
return formatter.format_string
# pylint: disable=line-too-long
class OverridesStringFormatterValidator(StringFormatterValidator):
"""
String that can `only` use :class:`overrides <ytdl_sub.config.preset_options.Overrides>`.
Used in fields that do not touch the downloaded files themselves, but instead, `single`
things like
:func:`output_directory <ytdl_sub.config.preset_options.OutputOptions.output_directory>`
or the fields in
:class:`nfo_output_directory <ytdl_sub.plugins.output_directory_nfo_tags.OutputDirectoryNfoTagsOptions>`
"""
_variable_not_found_error_msg_formatter = (
"Override variable '{variable_name}' does not exist. For this field, ensure your override "
"variable does not contain any source variables - it is a requirement that this be a "
"static string. Available override variables: {available_fields}"
)
# pylint: enable=line-too-long
class ListFormatterValidator(ListValidator[StringFormatterValidator]):
_inner_list_type = StringFormatterValidator
class DictFormatterValidator(LiteralDictValidator):
"""
A dict made up of
:class:`~ytdl_sub.validators.string_formatter_validators.StringFormatterValidator`.
"""
_key_validator = StringFormatterValidator
def __init__(self, name, value):
super().__init__(name, value)
for key in self._keys:
self._value[key] = self._validate_key(key=key, validator=self._key_validator)
@property
def dict(self) -> Dict[str, StringFormatterValidator]:
"""Returns dict with string formatter values"""
return self._value
@property
def dict_with_format_strings(self) -> Dict[str, str]:
"""Returns dict with the format strings themselves"""
return {key: string_formatter.format_string for key, string_formatter in self.dict.items()}
class OverridesDictFormatterValidator(DictFormatterValidator):
"""
A dict made up of
:class:`~ytdl_sub.validators.string_formatter_validators.OverridesStringFormatterValidator`.
"""
_key_validator = OverridesStringFormatterValidator | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/validators/string_formatter_validators.py | string_formatter_validators.py |
import os
from pathlib import Path
from typing import Any
from typing import Dict
from typing import Tuple
from ytdl_sub.config.defaults import MAX_FILE_NAME_BYTES
from ytdl_sub.utils.file_handler import get_file_extension
from ytdl_sub.utils.subtitles import SUBTITLE_EXTENSIONS
from ytdl_sub.validators.string_formatter_validators import OverridesStringFormatterValidator
from ytdl_sub.validators.string_formatter_validators import StringFormatterValidator
from ytdl_sub.validators.validators import StringValidator
class FFmpegFileValidator(StringValidator):
_expected_value_type_name = "ffmpeg dependency"
_ffmpeg_dependency = "ffmpeg"
def __init__(self, name: str, value: Any):
super().__init__(name, value)
if not os.path.isfile(self.value):
raise self._validation_exception(
f"Expects an {self._ffmpeg_dependency} executable at '{self.value}', but "
f"does not exist. See https://github.com/jmbannon/ytdl-sub#installation on how "
f"to install ffmpeg dependencies."
)
@property
def value(self) -> str:
"""Turn into a Path, then a string, to get correct directory separators"""
return str(Path(self._value))
class FFprobeFileValidator(FFmpegFileValidator):
_ffmpeg_dependency = "ffprobe"
class FilePathValidatorMixin:
_EXTENSION_BYTES = len("-thumb.jpg".encode("utf-8")) + 8
_DEFAULT_MAX_BASE_FILE_NAME_BYTES: int = MAX_FILE_NAME_BYTES - _EXTENSION_BYTES
_MAX_BASE_FILE_NAME_BYTES: int = _DEFAULT_MAX_BASE_FILE_NAME_BYTES
@classmethod
def set_max_file_name_bytes(cls, max_file_name_bytes: int) -> None:
"""Actually sets the max _base_ file name in bytes (excludes extension)"""
max_base_file_name_bytes = max_file_name_bytes - cls._EXTENSION_BYTES
# bound between (extension_bytes + 20, MAX_FILE_NAME_BYTES)
max_base_file_name_bytes = max(max_base_file_name_bytes, 16)
max_base_file_name_bytes = min(
max_base_file_name_bytes, MAX_FILE_NAME_BYTES - cls._EXTENSION_BYTES
)
cls._MAX_BASE_FILE_NAME_BYTES = max_base_file_name_bytes
@classmethod
def _is_file_name_too_long(cls, file_name: str) -> bool:
return len(file_name.encode("utf-8")) > cls._MAX_BASE_FILE_NAME_BYTES
@classmethod
def _get_extension_split(cls, file_name: str) -> Tuple[str, str]:
ext = get_file_extension(file_name)
return file_name[: -len(ext)], ext
@classmethod
def _truncate_file_name(cls, file_name: str) -> str:
file_sub_name, file_ext = cls._get_extension_split(file_name)
desired_size = cls._MAX_BASE_FILE_NAME_BYTES - len(file_ext.encode("utf-8")) - 1
while len(file_sub_name.encode("utf-8")) > desired_size:
file_sub_name = file_sub_name[:-1]
return f"{file_sub_name}.{file_ext}"
@classmethod
def _maybe_truncate_file_path(cls, file_path: Path) -> str:
"""Turn into a Path, then a string, to get correct directory separators"""
file_directory, file_name = os.path.split(Path(file_path))
if cls._is_file_name_too_long(file_name):
return str(Path(file_directory) / cls._truncate_file_name(file_name))
return str(file_path)
# pylint: disable=line-too-long
class StringFormatterFileNameValidator(StringFormatterValidator, FilePathValidatorMixin):
"""
Same as a
:class:`StringFormatterValidator <ytdl_sub.validators.string_formatter_validators.StringFormatterValidator>`
but ensures the file name does not exceed the OS limit (typically 255 bytes). If it does exceed,
it will preserve the extension and truncate the end of the file name.
"""
# pylint: enable=line-too-long
_expected_value_type_name = "filepath"
@classmethod
def _is_file_name_too_long(cls, file_name: str) -> bool:
return len(file_name.encode("utf-8")) > cls._MAX_BASE_FILE_NAME_BYTES
@classmethod
def _get_extension_split(cls, file_name: str) -> Tuple[str, str]:
"""
Returns
-------
file_name, ext (including .)
"""
if file_name.endswith(".info.json"):
ext = ".info.json"
elif file_name.endswith("-thumb.jpg"):
ext = "-thumb.jpg"
elif any(file_name.endswith(f".{subtitle_ext}") for subtitle_ext in SUBTITLE_EXTENSIONS):
file_name_split = file_name.split(".")
ext = file_name_split[-1]
# Try to capture .lang.ext
if len(file_name_split) > 2 and len(file_name_split[-2]) < 6:
ext = f".{file_name_split[-2]}.{file_name_split[-1]}"
else:
ext = f".{file_name.rsplit('.', maxsplit=1)[-1]}"
return file_name[: -len(ext)], ext
@classmethod
def _truncate_file_name(cls, file_name: str) -> str:
file_sub_name, file_ext = cls._get_extension_split(file_name)
while len(file_sub_name.encode("utf-8")) > cls._MAX_BASE_FILE_NAME_BYTES:
file_sub_name = file_sub_name[:-1]
return f"{file_sub_name}{file_ext}"
def apply_formatter(self, variable_dict: Dict[str, str]) -> str:
"""Turn into a Path, then a string, to get correct directory separators"""
file_path = Path(super().apply_formatter(variable_dict))
return self._maybe_truncate_file_path(file_path)
class OverridesStringFormatterFilePathValidator(OverridesStringFormatterValidator):
_expected_value_type_name = "static filepath"
def apply_formatter(self, variable_dict: Dict[str, str]) -> str:
"""Turn into a Path, then a string, to get correct directory separators"""
return os.path.realpath(super().apply_formatter(variable_dict)) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/validators/file_path_validators.py | file_path_validators.py |
import copy
from abc import ABC
from typing import Any
from typing import Dict
from typing import Generic
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import final
from ytdl_sub.utils.exceptions import ValidationException
ValueT = TypeVar("ValueT", bound=object)
ValidationExceptionT = TypeVar("ValidationExceptionT", bound=ValidationException)
ValidatorT = TypeVar("ValidatorT", bound="Validator")
def validation_exception(
name: str,
error_message: str | Exception,
exception_class: Type[ValidationExceptionT] = ValidationException,
) -> ValidationExceptionT:
"""
Parameters
----------
name
Name of the validator
error_message
Error message to include in the ValidationException
exception_class
Class of the exception
Returns
-------
Validation exception with a consistent prefix.
"""
prefix = f"Validation error in {name}: "
return exception_class(f"{prefix}{error_message}")
class Validator(ABC):
"""
Used to validate the value of a python object. This is the 'base' class that will first
check that the value's type matches the expected type. Validators that inherit from this should
perform their validation within the __init__.
"""
# If the value is not this expected type, error
_expected_value_type: Type = object
# When raising an error, call the type this value instead of its python name
_expected_value_type_name: Optional[str] = None
@classmethod
def partial_validate(cls, name: str, value: Any) -> None:
"""
Parameters
----------
name
Name of the validator
value
Value of the validator
"""
_ = cls(name=name, value=value)
def __init__(self, name: str, value: Any):
self._name = name
self._value = copy.deepcopy(value) # Always deep copy to avoid editing references
if not isinstance(self._value, self._expected_value_type):
expected_value_type_name = self._expected_value_type_name or str(
self._expected_value_type
)
raise self._validation_exception(
error_message=f"should be of type {expected_value_type_name}."
)
def _validation_exception(
self,
error_message: str | Exception,
exception_class: Type[ValidationExceptionT] = ValidationException,
) -> ValidationExceptionT:
"""
Parameters
----------
error_message
Error message to include in the ValidationException
Returns
-------
Validation exception with a consistent prefix.
"""
return validation_exception(self._name, error_message, exception_class)
class ValueValidator(Validator, ABC, Generic[ValueT]):
"""
Native type validator that returns the value as-is
"""
@property
def value(self) -> ValueT:
"""
Returns
-------
The value, unmodified
"""
return self._value
class BoolValidator(ValueValidator[bool]):
_expected_value_type: Type = bool
_expected_value_type_name = "boolean"
class StringValidator(ValueValidator[str]):
_expected_value_type = str
_expected_value_type_name = "string"
class FloatValidator(ValueValidator[float]):
_expected_value_type = (int, float)
_expected_value_type_name = "float"
class IntValidator(ValueValidator[int]):
_expected_value_type = int
_expected_value_type_name = "int"
class ListValidator(Validator, ABC, Generic[ValidatorT]):
"""
Validates a list of objects to validate
"""
_expected_value_type = list
_expected_value_type_name = "list"
_inner_list_type: Type[ValidatorT]
def __init__(self, name, value):
# If the value isn't actually a list, but a single value with the same type as the
# _inner_list_type, cast it to a list with a single element
if isinstance(value, self._inner_list_type._expected_value_type):
value = [value]
super().__init__(name, value)
self._list: List[ValidatorT] = [
self._inner_list_type(name=f"{name}.{i+1}", value=val)
for i, val in enumerate(self._value)
]
@property
def list(self) -> List[ValidatorT]:
"""
Returns
-------
The list
"""
return self._list
class StringListValidator(ListValidator[StringValidator]):
_expected_value_type_name = "string list"
_inner_list_type = StringValidator
class DictValidator(Validator):
"""
Validates dictionary-based fields. Errors to them as 'object's since this could be validating
a yaml.
"""
_expected_value_type = dict
_expected_value_type_name = "object"
def __init__(self, name, value):
super().__init__(name, value)
self.__validator_dict: Dict[str, Validator] = {}
@final
@property
def _root_name(self) -> str:
"""
Returns
-------
"first" from the first.element.of.the.name
"""
return self._name.split(".")[0]
@final
@property
def _dict(self) -> dict:
"""
Returns
-------
Dictionary value
"""
return self._value
@final
@property
def _validator_dict(self) -> Dict[str, Validator]:
"""
Returns dict containing names and validators of any keys that were validated.
This allows top-level validators to recursively search a dict validator.
"""
return self.__validator_dict
@final
@property
def _keys(self) -> List[str]:
"""
Returns
-------
Sorted list of dictionary keys
"""
return sorted(list(self._dict.keys()))
@final
def _validate_key(
self,
key: str,
validator: Type[ValidatorT],
default: Optional[Any] = None,
) -> ValidatorT:
"""
Parameters
----------
key
Name of they key in the dict to validate
validator
The validator to use for the key's value
default
If the key's value does not exist, use this value, unless it is None.
Returns
-------
An instance of the specified validator
"""
if key not in self._dict and default is None:
raise self._validation_exception(f"{key} is missing when it should be present.")
validator_name = f"{self._name}.{key}" if self._name else key
validator_instance = validator(
name=validator_name,
value=self._dict.get(key, default),
)
self.__validator_dict[validator_name] = validator_instance
return validator_instance
@final
def _validate_key_if_present(
self,
key: str,
validator: Type[ValidatorT],
default: Optional[Any] = None,
) -> Optional[ValidatorT]:
"""
If the key does not exist in the dict, and no default is provided, return None.
Otherwise, validate the key.
Parameters
----------
key
Name of they key in the dict to validate
validator
The validator to use for the key's value
default
If the key's value does not exist, use this value.
Returns
-------
An instance of the specified validator
"""
# If the key does not exist and default is None, return None
if key not in self._dict and default is None:
return None
# If the key exists but is None (null in YAML) or empty string, return None
if key in self._dict and self._dict[key] in (None, ""):
return None
return self._validate_key(key=key, validator=validator, default=default)
@final
@classmethod
def _partial_validate_key(
cls, name: str, value: Any, key: str, validator: Type[ValidatorT]
) -> None:
value_dict = DictValidator(name=name, value=value)
if key in value_dict._dict:
validator.partial_validate(name=f"{name}.{key}", value=value_dict._dict[key])
class LiteralDictValidator(DictValidator):
"""DictValidator with exposed dict and keys method"""
@property
def dict(self) -> Dict:
"""Returns the entire dict"""
return super()._dict
@property
def keys(self) -> List[str]:
"""Returns a sorted list of the dict's keys"""
return super()._keys | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/validators/validators.py | validators.py |
from abc import ABC
from collections import defaultdict
from typing import Dict
from typing import List
from ytdl_sub.validators.strict_dict_validator import StrictDictValidator
from ytdl_sub.validators.string_formatter_validators import DictFormatterValidator
from ytdl_sub.validators.string_formatter_validators import ListFormatterValidator
from ytdl_sub.validators.string_formatter_validators import StringFormatterValidator
from ytdl_sub.validators.validators import DictValidator
from ytdl_sub.validators.validators import ListValidator
class NfoTagsWithAttributesValidator(StrictDictValidator):
_required_keys = {"attributes", "tag"}
def __init__(self, name, value):
super().__init__(name, value)
self._attributes = self._validate_key(key="attributes", validator=DictFormatterValidator)
self._tag = self._validate_key(key="tag", validator=StringFormatterValidator)
@property
def attributes(self) -> DictFormatterValidator:
"""
Returns
-------
The attributes for this NFO tag
"""
return self._attributes
@property
def tag(self) -> StringFormatterValidator:
"""
Returns
-------
The value for this NFO tag
"""
return self._tag
class NfoTagsWithAttributesListValidator(ListValidator[NfoTagsWithAttributesValidator]):
"""TagsWithAttributes list for the entry NFO validator"""
_inner_list_type = NfoTagsWithAttributesValidator
class NfoTagsValidator(DictValidator, ABC):
def __init__(self, name, value):
super().__init__(name, value)
self._string_tags: Dict[str, List[StringFormatterValidator]] = defaultdict(list)
self._attribute_tags: Dict[str, List[NfoTagsWithAttributesValidator]] = defaultdict(list)
for key, tag_value in self._dict.items():
# Turn each value into a list if it's not
if not isinstance(tag_value, list):
tag_value = [tag_value]
if isinstance(tag_value[0], str):
self._string_tags[key].extend(
self._validate_key(
key=key,
validator=ListFormatterValidator,
).list
)
elif isinstance(tag_value[0], dict):
self._attribute_tags[key].extend(
self._validate_key(key=key, validator=NfoTagsWithAttributesListValidator).list
)
else:
raise self._validation_exception(
"must either be a single or list of string/attribute object"
)
@property
def string_tags(self) -> Dict[str, List[StringFormatterValidator]]:
"""
Returns
-------
Tags with no attributes
"""
return self._string_tags
@property
def attribute_tags(self) -> Dict[str, List[NfoTagsWithAttributesValidator]]:
"""
Returns
-------
Tags with attributes
"""
return self._attribute_tags | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/validators/nfo_validators.py | nfo_validators.py |
import xml.etree.ElementTree as et
from dataclasses import dataclass
from typing import Any
from typing import Dict
from typing import List
from typing import Union
@dataclass
class XmlElement:
text: str
attributes: Dict[str, str]
def to_dict_value(self) -> Union[str, Dict[str, Any]]:
"""
Returns
-------
Only the tag if no attributes, otherwise a dict containing both attributes and the tag
"""
if not self.attributes:
return self.text
return {
"attributes": self.attributes,
"tag": self.text,
}
def _to_max_3_byte_utf8_char(char: str) -> str:
return "□" if len(char.encode("utf-8")) > 3 else char
def to_max_3_byte_utf8_string(string: str) -> str:
"""
Parameters
----------
string
input string
Returns
-------
Casted unicode string
"""
return "".join(_to_max_3_byte_utf8_char(char) for char in string)
def to_max_3_byte_utf8_dict(string_dict: Dict[str, str]) -> Dict[str, str]:
"""
Parameters
----------
string_dict
Input string dict
Returns
-------
Casted dict
"""
return {
to_max_3_byte_utf8_string(key): to_max_3_byte_utf8_string(value)
for key, value in string_dict.items()
}
def to_xml(nfo_dict: Dict[str, List[XmlElement]], nfo_root: str) -> bytes:
"""
Transforms a dict to XML
Parameters
----------
nfo_dict
XML contents
nfo_root
Root of the XML
Returns
-------
XML bytes
"""
xml_root = et.Element(nfo_root)
for key, xml_elems in sorted(nfo_dict.items()):
for xml_elem in xml_elems:
sorted_attr = dict(sorted(xml_elem.attributes.items()))
sub_element = et.SubElement(xml_root, key, sorted_attr)
sub_element.text = xml_elem.text
et.indent(tree=xml_root, space=" ", level=0)
return et.tostring(element=xml_root, encoding="utf-8", xml_declaration=True) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/xml.py | xml.py |
import contextlib
import io
import logging
import sys
import tempfile
from dataclasses import dataclass
from pathlib import Path
from typing import List
from typing import Optional
from ytdl_sub import __local_version__
from ytdl_sub.utils.exceptions import ValidationException
from ytdl_sub.utils.file_handler import FileHandler
@dataclass
class LoggerLevel:
name: str
level: int
logging_level: int
class LoggerLevels:
"""
Custom log levels
"""
QUIET = LoggerLevel(name="quiet", level=0, logging_level=logging.WARNING) # Only warnings
INFO = LoggerLevel(name="info", level=10, logging_level=logging.INFO) # ytdl-sub info logs
VERBOSE = LoggerLevel(name="verbose", level=20, logging_level=logging.INFO) # ytdl-sub + yt-dlp
DEBUG = LoggerLevel(
name="debug", level=30, logging_level=logging.DEBUG
) # ytdl-sub + yt-dlp debug logs
@classmethod
def all(cls) -> List[LoggerLevel]:
"""
Returns
-------
All log levels
"""
return [cls.QUIET, cls.INFO, cls.VERBOSE, cls.DEBUG]
@classmethod
def from_str(cls, name: str) -> LoggerLevel:
"""
Parameters
----------
name
The log level name
Raises
------
ValueError
Name is not a valid logger level
"""
for logger_level in cls.all():
if name == logger_level.name:
return logger_level
raise ValueError("Invalid logger level name")
@classmethod
def names(cls) -> List[str]:
"""
Returns
-------
All log level names
"""
return [logger_level.name for logger_level in cls.all()]
class StreamToLogger(io.StringIO):
def __init__(self, logger: logging.Logger, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logger = logger
def write(self, __s: str) -> int:
"""
Writes to the logger and stream
"""
if __s != "\n":
self._logger.info(__s.removesuffix("\n"))
return super().write(__s)
class Logger:
# The level set via CLI arguments
_LOGGER_LEVEL: LoggerLevel = LoggerLevels.DEBUG
# Ignore 'using with' warning since this will be cleaned up later
# pylint: disable=R1732
_DEBUG_LOGGER_FILE = tempfile.NamedTemporaryFile(prefix="ytdl-sub.", delete=False)
# pylint: enable=R1732
# Whether the final exception lines were added to the debug log
_LOGGED_EXIT_EXCEPTION: bool = False
# Keep track of all Loggers created
_LOGGERS: List[logging.Logger] = []
@classmethod
def debug_log_filename(cls) -> str:
"""
Returns
-------
File name of the debug log file
"""
return cls._DEBUG_LOGGER_FILE.name
@classmethod
def set_log_level(cls, log_level_name: str):
"""
Parameters
----------
log_level_name
Name of the log level to set
"""
cls._LOGGER_LEVEL = LoggerLevels.from_str(name=log_level_name)
@classmethod
def _get_formatter(cls) -> logging.Formatter:
"""
Returns
-------
Formatter for all ytdl-sub loggers
"""
return logging.Formatter("[%(name)s] %(message)s")
@classmethod
def _get_stdout_handler(cls) -> logging.StreamHandler:
"""
Returns
-------
Logger handler
"""
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(cls._LOGGER_LEVEL.logging_level)
handler.setFormatter(cls._get_formatter())
return handler
@classmethod
def _get_debug_file_handler(cls) -> logging.FileHandler:
handler = logging.FileHandler(filename=cls.debug_log_filename(), encoding="utf-8")
handler.setLevel(logging.DEBUG)
handler.setFormatter(cls._get_formatter())
return handler
@classmethod
def _get(
cls, name: Optional[str] = None, stdout: bool = True, debug_file: bool = True
) -> logging.Logger:
logger_name = "ytdl-sub"
if name:
logger_name += f":{name}"
logger = logging.Logger(name=logger_name, level=logging.DEBUG)
if stdout:
logger.addHandler(cls._get_stdout_handler())
if debug_file:
logger.addHandler(cls._get_debug_file_handler())
cls._LOGGERS.append(logger)
return logger
@classmethod
def get(cls, name: Optional[str] = None) -> logging.Logger:
"""
Parameters
----------
name
Optional. Name of the logger which is included in the prefix like [ytdl-sub:<name>].
If None, the prefix is just [ytdl-sub]
Returns
-------
A configured logger
"""
return cls._get(name=name, stdout=True, debug_file=True)
@classmethod
@contextlib.contextmanager
def handle_external_logs(cls, name: Optional[str] = None) -> None:
"""
Suppresses all stdout and stderr logs. Intended to suppress other packages logs.
Will always write these logs to the debug logger file.
Parameters
----------
name
Optional. Name of the logger which is included in the prefix like [ytdl-sub:<name>].
If None, the prefix is just [ytdl-sub]mak
"""
logger = cls._get(
name=name, stdout=cls._LOGGER_LEVEL.level >= LoggerLevels.VERBOSE.level, debug_file=True
)
with StreamToLogger(logger=logger) as redirect_stream:
try:
with contextlib.redirect_stdout(new_target=redirect_stream):
with contextlib.redirect_stderr(new_target=redirect_stream):
yield
finally:
redirect_stream.flush()
@classmethod
def log_exit_exception(cls, exception: Exception, log_filepath: Optional[Path] = None):
"""
Performs the final log before exiting from an error
Parameters
----------
exception
The exception to log
log_filepath
Optional. The filepath to the debug logs
"""
if not cls._LOGGED_EXIT_EXCEPTION:
logger = cls.get()
# Log validation exceptions as-is
if isinstance(exception, ValidationException):
logger.error(exception)
# For other uncaught errors, log as bug:
else:
logger.exception("An uncaught error occurred:")
logger.error(
"Version %s\nPlease upload the error log file '%s' and make a Github "
"issue at https://github.com/jmbannon/ytdl-sub/issues with your config and "
"command/subscription yaml file to reproduce. Thanks for trying ytdl-sub!",
__local_version__,
log_filepath if log_filepath else Logger.debug_log_filename(),
)
cls._LOGGED_EXIT_EXCEPTION = True
@classmethod
def cleanup(cls):
"""
Cleans up debug log file left behind
"""
for logger in cls._LOGGERS:
for handler in logger.handlers:
handler.close()
cls._DEBUG_LOGGER_FILE.close()
FileHandler.delete(cls.debug_log_filename())
cls._LOGGED_EXIT_EXCEPTION = False | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/logger.py | logger.py |
import errno
import os
from contextlib import contextmanager
from pathlib import Path
from ytdl_sub.config.config_file import ConfigFile
from ytdl_sub.utils.exceptions import ValidationException
from ytdl_sub.utils.logger import Logger
from ytdl_sub.utils.system import IS_WINDOWS
logger = Logger.get()
if IS_WINDOWS:
@contextmanager
def working_directory_lock(config: ConfigFile):
"""Windows does not support working directory lock"""
logger.info(
"Working directory lock not supported in Windows. "
"Ensure only one instance of ytdl-sub runs at once using working directory %s",
config.config_options.working_directory,
)
yield
else:
import fcntl
@contextmanager
def working_directory_lock(config: ConfigFile):
"""
Create and try to lock the file /tmp/working_directory_name
Raises
------
ValidationException
Lock is acquired from another process running ytdl-sub in the same working directory
OSError
Other lock error occurred
"""
working_directory_path = Path(os.getcwd()) / config.config_options.working_directory
lock_file_path = (
Path(os.getcwd())
/ config.config_options.lock_directory
/ str(working_directory_path).replace("/", "_")
)
try:
lock_file = open(lock_file_path, "w", encoding="utf-8")
except FileNotFoundError as exc:
# pylint: disable=line-too-long
raise ValidationException(
"Failed to create a file-lock to prevent multiple instances of ytdl-sub from "
"colliding with each other. If you get this error, it typically means it tried to "
"create the file in a directory that is not a part of the same filesystem that "
"ytdl-sub is running on. See "
"https://ytdl-sub.readthedocs.io/en/latest/config.html#ytdl_sub.config.config_validator.ConfigOptions.lock_directory "
"on how to change the directory that this lock gets written to."
) from exc
# pylint: enable=line-too-long
try:
fcntl.lockf(lock_file, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as exc:
if exc.errno in (errno.EACCES, errno.EAGAIN):
raise ValidationException(
"Cannot run two instances of ytdl-sub "
"with the same working directory at the same time"
) from exc
lock_file.close()
raise exc
try:
yield
finally:
fcntl.flock(lock_file, fcntl.LOCK_UN)
lock_file.close() | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/file_lock.py | file_lock.py |
import re
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from ytdl_sub.entries.entry import Entry
from ytdl_sub.entries.variables.kwargs import CHAPTERS
from ytdl_sub.entries.variables.kwargs import YTDL_SUB_CUSTOM_CHAPTERS
from ytdl_sub.utils.file_handler import FileMetadata
class Timestamp:
# Captures the following formats:
# 0:00 title
# 00:00 title
# 1:00:00 title
# 01:00:00 title
# where capture group 1 and 2 are the timestamp and title, respectively
TIMESTAMP_REGEX = re.compile(r"((?:\d\d:)?(?:\d:)?(?:\d)?\d:\d\d)")
@classmethod
def _normalize_timestamp_str(cls, timestamp_str: str) -> str:
match = cls.TIMESTAMP_REGEX.match(timestamp_str)
if not match:
raise ValueError(f"Cannot parse youtube timestamp '{timestamp_str}'")
timestamp = match.group(1)
match len(timestamp):
case 4: # 0:00
timestamp = f"00:0{timestamp}"
case 5: # 00:00
timestamp = f"00:{timestamp}"
case 7: # 0:00:00
timestamp = f"0{timestamp}"
case _:
pass
assert len(timestamp) == 8
return timestamp
def __init__(self, timestamp_sec: int):
self._timestamp_sec = timestamp_sec
@property
def timestamp_sec(self) -> int:
"""
Returns
-------
Timestamp in seconds
"""
return self._timestamp_sec
@property
def _hours_minutes_seconds(self) -> Tuple[int, int, int]:
seconds = self.timestamp_sec
hours = int(seconds / 3600)
seconds -= hours * 3600
minutes = int(seconds / 60)
seconds -= minutes * 60
return hours, minutes, seconds
@property
def readable_str(self) -> str:
"""
Returns
-------
The timestamp in '0:SS' format (min trim).
"""
hours, minutes, seconds = self._hours_minutes_seconds
if hours:
return f"{str(hours)}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
if minutes:
return f"{str(minutes)}:{str(seconds).zfill(2)}"
return f"0:{str(seconds).zfill(2)}"
@property
def standardized_str(self) -> str:
"""
Returns
-------
The timestamp in 'HH:MM:SS' format
"""
hours, minutes, seconds = self._hours_minutes_seconds
return f"{str(hours).zfill(2)}:{str(minutes).zfill(2)}:{str(seconds).zfill(2)}"
@classmethod
def from_seconds(cls, timestamp_sec: int) -> "Timestamp":
"""
Parameters
----------
timestamp_sec
Timestamp in number of seconds
"""
return cls(timestamp_sec=timestamp_sec)
@classmethod
def from_str(cls, timestamp_str: str) -> "Timestamp":
"""
Parameters
----------
timestamp_str
Timestamp in the form of "HH:MM:SS"
Raises
------
ValueError
Invalid timestamp string format
"""
hour_minute_second = cls._normalize_timestamp_str(timestamp_str).split(":")
if len(hour_minute_second) != 3:
raise ValueError("Youtube timestamp must be in the form of 'HH:MM:SS'")
hour, minute, second = tuple(x for x in hour_minute_second)
try:
return cls(timestamp_sec=(int(hour) * 3600) + (int(minute) * 60) + int(second))
except ValueError as cast_exception:
raise ValueError(
"Youtube timestamp must be in the form of 'HH:MM:SS'"
) from cast_exception
class Chapters:
"""
Represents a list of (timestamps, titles)
"""
def __init__(
self,
timestamps: List[Timestamp],
titles: List[str],
):
self.timestamps = timestamps
self.titles = titles
for idx in range(len(timestamps) - 1):
if timestamps[idx].timestamp_sec >= timestamps[idx + 1].timestamp_sec:
raise ValueError("Timestamps must be in ascending order")
def contains_any_chapters(self) -> bool:
"""
Returns
-------
True if there are chapters. False otherwise.
"""
return len(self.timestamps) > 0
def contains_zero_timestamp(self) -> bool:
"""
Returns
-------
True if the first timestamp starts at 0. False otherwise.
"""
return self.timestamps[0].timestamp_sec == 0
def to_file_metadata_dict(self) -> Dict:
"""
Returns
-------
Metadata dict
"""
return {ts.readable_str: title for ts, title in zip(self.timestamps, self.titles)}
def to_file_metadata(self, title: Optional[str] = None) -> FileMetadata:
"""
Parameters
----------
title
Optional title
Returns
-------
Chapter metadata in the format of { readable_timestamp_str: title }
"""
return FileMetadata.from_dict(
value_dict=self.to_file_metadata_dict(),
title=title,
sort_dict=False, # timestamps + titles are already sorted
)
@classmethod
def from_string(cls, input_str: str) -> "Chapters":
"""
From a string (description or comment), try to extract Chapters.
The scraping logic is simple, if three or more successive lines have timestamps, grab
as many in succession as possible. Remove the timestamp portion to get the chapter title.
Parameters
----------
input_str
String to scrape
Returns
-------
Chapters
Could be empty
"""
timestamps: List[Timestamp] = []
titles: List[str] = []
# Try to accumulate chapters by parsing lines individually
for line in input_str.split("\n"):
# Timestamp captured, store it
if match := Timestamp.TIMESTAMP_REGEX.search(line):
timestamp_str = match.group(1)
timestamps.append(Timestamp.from_str(timestamp_str))
# Remove timestamp and surrounding whitespace from it
title_str = re.sub(f"\\s*{re.escape(timestamp_str)}\\s*", " ", line).strip()
titles.append(title_str)
# If more than 3 timestamps were parsed, return it
if len(timestamps) >= 3:
return Chapters(timestamps=timestamps, titles=titles)
# Otherwise return empty chapters
return Chapters(timestamps=[], titles=[])
@classmethod
def from_entry_chapters(cls, entry: Entry) -> "Chapters":
"""
Parameters
----------
entry
Entry with yt-dlp chapter metadata
Returns
-------
Chapters object
"""
timestamps: List[Timestamp] = []
titles: List[str] = []
if entry.kwargs_contains(CHAPTERS):
for chapter in entry.kwargs_get(CHAPTERS, []):
timestamps.append(Timestamp.from_seconds(int(float(chapter["start_time"]))))
titles.append(chapter["title"])
elif entry.kwargs_contains(YTDL_SUB_CUSTOM_CHAPTERS):
for start_time, title in entry.kwargs_get(YTDL_SUB_CUSTOM_CHAPTERS, {}).items():
timestamps.append(Timestamp.from_str(start_time))
titles.append(title)
return Chapters(timestamps=timestamps, titles=titles)
@classmethod
def from_empty(cls) -> "Chapters":
"""
Initialize empty chapters
"""
return Chapters(timestamps=[], titles=[])
def __len__(self) -> int:
"""
Returns
-------
Number of chapters
"""
return len(self.timestamps)
def is_empty(self) -> bool:
"""
Returns
-------
True if no chapters. False otherwise.
"""
return len(self) == 0 | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/chapters.py | chapters.py |
import hashlib
import json
import os
import shutil
from collections import defaultdict
from pathlib import Path
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Set
from typing import Union
from ytdl_sub.utils.subtitles import SUBTITLE_EXTENSIONS
def get_file_extension(file_name: Path | str) -> str:
"""
Returns the file extension from a file name. Tries to return .info.json and .lang.subtitle
extensions if detected, otherwise splits on the last `.` and returns the latter part
"""
if file_name.endswith(".info.json"):
return "info.json"
if any(file_name.endswith(f".{subtitle_ext}") for subtitle_ext in SUBTITLE_EXTENSIONS):
file_name_split = file_name.split(".")
ext = file_name_split[-1]
# Try to capture .lang.ext
if len(file_name_split) > 2 and len(file_name_split[-2]) < 6:
ext = f"{file_name_split[-2]}.{file_name_split[-1]}"
return ext
return file_name.rsplit(".", maxsplit=1)[-1]
def get_file_md5_hash(full_file_path: Path | str) -> str:
"""
Parameters
----------
full_file_path
Path to the file
Returns
-------
md5 hash of its contents
"""
with open(full_file_path, "rb") as file:
return hashlib.md5(file.read()).hexdigest()
def files_equal(full_file_path_a: Path | str, full_file_path_b: Path | str) -> bool:
"""
Parameters
----------
full_file_path_a
full_file_path_b
Returns
-------
True if the files are equal in contents. False otherwise.
"""
if not (os.path.isfile(full_file_path_a) and os.path.isfile(full_file_path_b)):
return False
if os.path.getsize(full_file_path_a) != os.path.getsize(full_file_path_b):
return False
if get_file_md5_hash(full_file_path_a) != get_file_md5_hash(full_file_path_b):
return False
return True
class FileMetadata:
"""
Stores pretty-printed information about a file. Each line in the metadata represents a newline
"""
def __init__(self, metadata: Optional[Union[str, List[str]]] = None):
self.metadata = []
if isinstance(metadata, str):
self.metadata = [metadata]
elif isinstance(metadata, list):
self.metadata = metadata
def append(self, line: str) -> "FileMetadata":
"""
Parameters
----------
line
Line of metadata to append
"""
self.metadata.append(line)
return self
def extend(self, other: Optional["FileMetadata"]) -> "FileMetadata":
"""
Parameters
----------
other
Other metadata to extend to this one in its entirety
"""
if other is not None:
self.metadata.extend(other.metadata)
return self
@classmethod
def from_dict(
cls, value_dict: Dict[str, Any], title: Optional[str] = None, sort_dict: bool = True
) -> "FileMetadata":
"""
Parameters
----------
value_dict
Dict of things to print indented
title
Optional. Title line to put above the dict
sort_dict
Whether to sort dicts in the value_dict. Defaults to true.
"""
if title:
value_dict = {title: value_dict}
if sort_dict:
value_dict = json.loads(json.dumps(value_dict, sort_keys=True, ensure_ascii=False))
def _indent_lines(value: str, indent: int) -> str:
if "\n" not in value:
return value
output_str = ""
_indent = " " * indent
for line in value.split("\n"):
output_str += f"{_indent}{line}\n"
return f"{output_str.rstrip()}\n"
def _single_value(value: Any) -> Optional[str]:
if isinstance(value, list) and len(value) == 1:
return _single_value(value=value[0])
if isinstance(value, (dict, list)):
return None
if isinstance(value, str) and "\n" in value:
return None
return value
def _recursive_lines(value: Any, indent: int = 0) -> str:
_indent = " " * indent
output_str = ""
if isinstance(value, dict):
for key, sub_value in value.items():
single_sub_value = _single_value(sub_value)
if single_sub_value is not None:
output_str += f"{_indent}{key}: {single_sub_value}\n"
else:
output_str += f"{_indent}{key}:\n"
output_str += _indent_lines(_recursive_lines(sub_value), indent=indent + 2)
elif isinstance(value, list):
for sub_value in value:
single_sub_value = _single_value(sub_value)
if single_sub_value is not None:
output_str += f"{_indent}- {single_sub_value}\n"
else:
output_str += f"{_indent}- \n"
output_str += _indent_lines(_recursive_lines(sub_value), indent=indent + 2)
elif isinstance(value, str): # multi-line string
output_str += _indent_lines(value, indent=indent)
else:
assert False, "should never reach here"
return output_str
out = _recursive_lines(value_dict).rstrip().split("\n")
return cls(metadata=out)
class FileHandlerTransactionLog:
"""
Tracks file 'transactions' performed by a FileHandler
"""
@classmethod
def format_path_str(cls, path_str: Path | str) -> str:
"""
Returns
-------
str formatted to always look like a unix string
"""
return str(path_str).replace(os.sep, "/")
def __init__(self):
self.files_created: Dict[str, FileMetadata] = {}
self.files_modified: Dict[str, FileMetadata] = {}
self.files_removed: Set[str] = set()
@property
def is_empty(self) -> bool:
"""
Returns
-------
True if no transaction logs are recorded. False otherwise
"""
return (
len(self.files_created) == 0
and len(self.files_removed) == 0
and len(self.files_modified) == 0
)
def log_created_file(
self, file_name: str, file_metadata: Optional[FileMetadata] = None
) -> "FileHandlerTransactionLog":
"""
Adds a created file to the transaction log
Parameters
----------
file_name
Name of the file in the output directory
file_metadata
Optional. If the file has metadata, add it to the transaction log
"""
if not file_metadata:
file_metadata = FileMetadata()
self.files_created[file_name] = file_metadata
return self
def log_modified_file(
self, file_name: str, file_metadata: Optional[FileMetadata] = None
) -> "FileHandlerTransactionLog":
"""
Adds a modified file to the transaction log
Parameters
----------
file_name
Name of the file in the output directory
file_metadata
Optional. If the file has metadata, add it to the transaction log
"""
if not file_metadata:
file_metadata = FileMetadata()
self.files_modified[file_name] = file_metadata
return self
def log_removed_file(self, file_name: str) -> "FileHandlerTransactionLog":
"""
Records a file removed from the output directory
Parameters
----------
file_name
Name of the file in the output directory getting removed
"""
self.files_removed.add(file_name)
return self
@classmethod
def _indent_metadata_line(cls, line: str, indent: int) -> str:
# Do not indent empty lines
rstrip_line = line.rstrip()
indent_str = " " * indent
return f"{indent_str}{rstrip_line}" if rstrip_line else ""
@classmethod
def _to_output_message(
cls, file_set_title: str, file_set: Dict[str, Optional[FileMetadata]], output_directory: str
) -> List[str]:
if not file_set:
return []
directory_set: Dict[str, Dict[str, Optional[FileMetadata]]] = defaultdict(dict)
for file_path, file_metadata in sorted(file_set.items()):
file_directory = os.path.dirname(Path(output_directory) / file_path)
file_name = os.path.basename(Path(output_directory) / file_path)
# Format file directories/names to always look like unix
file_directory = cls.format_path_str(file_directory)
file_name = cls.format_path_str(file_name)
directory_set[file_directory][file_name] = file_metadata
lines: List[str] = [file_set_title, "-" * 40]
for directory, directory_file_set in directory_set.items():
lines.append(directory)
for file_name, file_metadata in directory_file_set.items():
lines.append(cls._indent_metadata_line(file_name, indent=2))
if not file_metadata:
continue
lines.extend(
[cls._indent_metadata_line(line, indent=4) for line in file_metadata.metadata]
)
return lines
def to_output_message(self, output_directory: str) -> str:
"""
Parameters
----------
output_directory
Path to the output directory. Included in the output message
Returns
-------
The output message to show users what was recorded in the transaction log
"""
lines: List[str] = []
if self.files_created:
lines.extend(
self._to_output_message(
file_set_title="Files created:",
file_set=self.files_created,
output_directory=output_directory,
)
)
if self.files_modified:
# Add a blank line to separate created files
if self.files_created:
lines.append("")
lines.extend(
self._to_output_message(
file_set_title="Files modified:",
file_set=self.files_modified,
output_directory=output_directory,
)
)
if self.files_removed:
# Add a blank line to separate created/removed files
if self.files_created or self.files_modified:
lines.append("")
lines.extend(
self._to_output_message(
file_set_title="Files removed:",
file_set={file_name: None for file_name in self.files_removed},
output_directory=output_directory,
)
)
if self.is_empty:
lines.append(
f"No new, modified, or removed files in '{self.format_path_str(output_directory)}'"
)
return "\n".join(lines)
class FileHandler:
"""
Performs and tracks all file moving/copying/deleting
"""
def __init__(self, working_directory: str, output_directory: str, dry_run: bool):
self.dry_run = dry_run
self.working_directory = working_directory
self.output_directory = output_directory
self._file_handler_transaction_log = FileHandlerTransactionLog()
@property
def file_handler_transaction_log(self) -> FileHandlerTransactionLog:
"""
Returns
-------
Transaction logs of this file handler
"""
return self._file_handler_transaction_log
@classmethod
def copy(cls, src_file_path: Union[str, Path], dst_file_path: Union[str, Path]):
"""
Parameters
----------
src_file_path
Source file
dst_file_path
Destination file
"""
shutil.copyfile(src=src_file_path, dst=dst_file_path)
@classmethod
def move(cls, src_file_path: Union[str, Path], dst_file_path: Union[str, Path]):
"""
Parameters
----------
src_file_path
Source file
dst_file_path
Destination file
Raises
------
OSError
Cross-device link workaround
"""
try:
shutil.move(src=src_file_path, dst=dst_file_path)
except OSError:
# Invalid cross-device link
# Can happen from using os.rename under the hood, which requires the two file on the
# same filesystem. Work around it by copying and deleting the file
cls.copy(src_file_path, dst_file_path)
cls.delete(src_file_path)
@classmethod
def delete(cls, file_path: Union[str, Path]):
"""
Parameters
----------
file_path
File to delete
"""
if os.path.isfile(file_path):
os.remove(file_path)
def move_file_to_output_directory(
self,
file_name: str,
output_file_name: str,
file_metadata: Optional[FileMetadata] = None,
copy_file: bool = False,
):
"""
Copies a file from the working directory to the output directory.
All file copies from working to output directory should use this function for tracking and
handling dry-run logic.
Parameters
----------
file_name
File in the working directory
output_file_name
Desired output file name in the output_directory
file_metadata
Optional. Metadata to record to the transaction log for this file
copy_file
Optional. If True, copy the file. Move otherwise
Returns
-------
bool
True if modified. False otherwise.
"""
is_modified = False
source_file_path = Path(self.working_directory) / file_name
output_file_path = Path(self.output_directory) / output_file_name
# output file exists, and it's not marked as created already, see if we modify it
if (
os.path.isfile(output_file_path)
and output_file_name not in self.file_handler_transaction_log.files_created
):
if not files_equal(source_file_path, output_file_path):
self.file_handler_transaction_log.log_modified_file(
file_name=output_file_name, file_metadata=file_metadata
)
is_modified = True
# output file does not already exist, creates a new file
else:
self.file_handler_transaction_log.log_created_file(
file_name=output_file_name, file_metadata=file_metadata
)
if not self.dry_run:
os.makedirs(os.path.dirname(output_file_path), exist_ok=True)
if copy_file:
self.copy(src_file_path=source_file_path, dst_file_path=output_file_path)
else:
self.move(src_file_path=source_file_path, dst_file_path=output_file_path)
# Simulate the file being moved during dry run by deleting it
elif self.dry_run and not copy_file:
FileHandler.delete(source_file_path)
return is_modified
def delete_file_from_output_directory(self, file_name: str):
"""
Deletes a file from the output directory. All file deletions should use this function
for tracking and handling dry-run logic.
Parameters
----------
file_name
File in the output directory to delete
"""
file_path = Path(self.output_directory) / file_name
exists = os.path.isfile(file_path)
if exists:
self._file_handler_transaction_log.log_removed_file(file_name)
if not self.dry_run:
self.delete(file_path=file_path) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/file_handler.py | file_handler.py |
import subprocess
import tempfile
from typing import Dict
from typing import List
from typing import Optional
from ytdl_sub.utils.chapters import Chapters
from ytdl_sub.utils.exceptions import ValidationException
from ytdl_sub.utils.file_handler import FileHandler
from ytdl_sub.utils.logger import Logger
logger = Logger.get(name="ffmpeg")
_FFMPEG_METADATA_SPECIAL_CHARS = ["=", ";", "#", "\n", "\\"]
def _ffmpeg_metadata_escape(str_to_escape: str) -> str:
# backslash at the end of the list is intentional
for special_char in _FFMPEG_METADATA_SPECIAL_CHARS:
str_to_escape.replace(special_char, f"\\{special_char}")
return str_to_escape
class FFMPEG:
_FFMPEG_PATH: str = ""
_FFPROBE_PATH: str = ""
@classmethod
def set_paths(cls, ffmpeg_path: str, ffprobe_path: str) -> None:
"""Set ffmpeg paths for usage"""
cls._FFMPEG_PATH = ffmpeg_path
cls._FFPROBE_PATH = ffprobe_path
@classmethod
def ffmpeg_path(cls) -> str:
"""Ensure the ffmpeg path has been set and return it"""
assert cls._FFMPEG_PATH, "ffmpeg has not been set"
return cls._FFMPEG_PATH
@classmethod
def ffprobe_path(cls) -> str:
"""Ensure the ffprobe path has been set and return it"""
assert cls._FFPROBE_PATH, "ffprobe has not been set"
return cls._FFPROBE_PATH
@classmethod
def _ensure_installed(cls):
try:
subprocess.check_output([cls.ffmpeg_path(), "-version"])
except subprocess.CalledProcessError as subprocess_error:
raise ValidationException(
"Trying to use a feature which requires ffmpeg, but it cannot be found"
) from subprocess_error
@classmethod
def tmp_file_path(cls, relative_file_path: str, extension: Optional[str] = None) -> str:
"""
Parameters
----------
relative_file_path
Path of input file that is going to be modified
extension
Desired output extension. Defaults to input file's extension
Returns
-------
Temporary file path for ffmpeg output
"""
if extension is None:
extension = relative_file_path.split(".")[-1]
return f"{relative_file_path}.out.{extension}"
@classmethod
def run(cls, ffmpeg_args: List[str], timeout: Optional[float] = None) -> None:
"""
Runs an ffmpeg command. Should not include 'ffmpeg' as the beginning argument.
Parameters
----------
ffmpeg_args:
Arguments to pass to ffmpeg. Each one will be separated by a space.
timeout
Optional. timeout
"""
cls._ensure_installed()
cmd = [cls.ffmpeg_path()]
cmd.extend(ffmpeg_args)
logger.debug("Running %s", " ".join(cmd))
with Logger.handle_external_logs(name="ffmpeg"):
subprocess.run(cmd, check=True, capture_output=True, timeout=timeout)
def _create_metadata_chapter_entry(start_sec: int, end_sec: int, title: str) -> List[str]:
return [
"",
"[CHAPTER]",
"TIMEBASE=1/1000",
f"START={start_sec * 1000}",
f"END={end_sec * 1000}",
f"title={_ffmpeg_metadata_escape(title)}",
]
def _create_metadata_chapters(chapters: Chapters, file_duration_sec: int) -> List[str]:
lines: List[str] = []
if not chapters.contains_zero_timestamp():
lines += _create_metadata_chapter_entry(
start_sec=0,
end_sec=chapters.timestamps[0].timestamp_sec,
title="Intro", # TODO: make this configurable
)
for idx in range(len(chapters.timestamps) - 1):
lines += _create_metadata_chapter_entry(
start_sec=chapters.timestamps[idx].timestamp_sec,
end_sec=chapters.timestamps[idx + 1].timestamp_sec,
title=chapters.titles[idx],
)
# Add the last chapter using the file duration
lines += _create_metadata_chapter_entry(
start_sec=chapters.timestamps[-1].timestamp_sec,
end_sec=file_duration_sec,
title=chapters.titles[-1],
)
return lines
def set_ffmpeg_metadata_chapters(
file_path: str, chapters: Optional[Chapters], file_duration_sec: int
) -> None:
"""
Sets ffmetadata chapters to a file. Note that this will (I think) wipe all prior
metadata.
Parameters
----------
file_path
Full path to the file to add metadata to
chapters
Chapters to embed in the file. If a chapter for 0:00 does not exist, one is created
file_duration_sec
Length of the file in seconds
"""
lines = [";FFMETADATA1"]
if chapters:
lines += _create_metadata_chapters(chapters=chapters, file_duration_sec=file_duration_sec)
tmp_file_path = FFMPEG.tmp_file_path(relative_file_path=file_path)
with tempfile.NamedTemporaryFile(
mode="w", suffix=".txt", encoding="utf-8", delete=False
) as metadata_file:
metadata_file.write("\n".join(lines))
metadata_file.flush()
try:
FFMPEG.run(
[
"-i",
file_path,
"-i",
metadata_file.name,
"-map",
"0",
"-dn", # ignore data streams
"-map_chapters",
"1",
"-bitexact", # for reproducibility
"-codec",
"copy",
tmp_file_path,
]
)
FileHandler.move(tmp_file_path, file_path)
finally:
FileHandler.delete(metadata_file.name)
def add_ffmpeg_metadata_key_values(file_path: str, key_values: Dict[str, str]) -> None:
"""
Parameters
----------
file_path
File to add metadata key/values to
key_values
The key/values to add
"""
tmp_file_path = FFMPEG.tmp_file_path(file_path)
ffmpeg_args = [
"-i",
file_path,
"-map",
"0",
"-dn", # ignore data streams
]
for key, value in key_values.items():
ffmpeg_args.extend(["-metadata", f"{key}={value}"])
ffmpeg_args.extend(["-codec", "copy", tmp_file_path])
FFMPEG.run(ffmpeg_args)
FileHandler.move(tmp_file_path, file_path) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/ffmpeg.py | ffmpeg.py |
import logging
import os
import tempfile
from subprocess import CalledProcessError
from typing import Optional
from urllib.request import urlopen
from ytdl_sub.entries.entry import Entry
from ytdl_sub.utils.ffmpeg import FFMPEG
from ytdl_sub.utils.file_handler import FileHandler
from ytdl_sub.utils.logger import Logger
from ytdl_sub.utils.retry import retry
class ThumbnailTypes:
LATEST_ENTRY = "latest_entry"
logger: logging.Logger = Logger.get("thumbnail")
def try_convert_download_thumbnail(entry: Entry) -> None:
"""
Converts an entry's downloaded thumbnail into jpg format.
Log with a warning if the thumbnail is not found or fails to convert
Parameters
----------
entry
Entry with the thumbnail
"""
download_thumbnail_path: Optional[str] = entry.try_get_ytdlp_download_thumbnail_path()
download_thumbnail_path_as_jpg = entry.get_download_thumbnail_path()
# If it was already converted, do not convert again
if entry.is_thumbnail_downloaded():
return
if not download_thumbnail_path:
logger.warning("Thumbnail for '%s' was not downloaded", entry.title)
return
if not download_thumbnail_path == download_thumbnail_path_as_jpg:
try:
FFMPEG.run(
["-y", "-bitexact", "-i", download_thumbnail_path, download_thumbnail_path_as_jpg]
)
except CalledProcessError:
logger.warning("Failed to convert thumbnail for '%s' to jpg", entry.title)
finally:
FileHandler.delete(download_thumbnail_path)
@retry(times=3, exceptions=(Exception,))
def download_and_convert_url_thumbnail(
thumbnail_url: Optional[str], output_thumbnail_path: str
) -> Optional[bool]:
"""
Downloads and converts a thumbnail from a url into a jpg
Parameters
----------
thumbnail_url
URL of the thumbnail
output_thumbnail_path
Thumbnail file destination after its converted to jpg
Returns
-------
True to indicate it converted the thumbnail from url. None if the retry failed.
"""
if not thumbnail_url:
return None
# timeout after 8 seconds
with urlopen(thumbnail_url, timeout=1.0) as file:
with tempfile.NamedTemporaryFile(delete=False) as thumbnail:
thumbnail.write(file.read())
try:
os.makedirs(os.path.dirname(output_thumbnail_path), exist_ok=True)
tmp_output_path = FFMPEG.tmp_file_path(
relative_file_path=thumbnail.name, extension="jpg"
)
# Add timeout of 1 second in case ffmpeg hangs from a bad thumbnail
FFMPEG.run(["-y", "-bitexact", "-i", thumbnail.name, tmp_output_path], timeout=1)
# Have FileHandler handle the move to a potential cross-device
FileHandler.move(tmp_output_path, output_thumbnail_path)
finally:
FileHandler.delete(tmp_output_path)
FileHandler.delete(thumbnail.name)
return True | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/utils/thumbnail.py | thumbnail.py |
import json
import os.path
import threading
import time
from json import JSONDecodeError
from pathlib import Path
from typing import Optional
from typing import Set
from ytdl_sub.utils.logger import Logger
logger = Logger.get(name="downloader")
class LogEntriesDownloadedListener(threading.Thread):
def __init__(self, working_directory: str, log_prefix: str):
"""
To be ran in a thread while download via ytdl-sub. Listens for new .info.json files in the
working directory, checks the extractor value, and if it matches the input arg, log the
title.
Parameters
----------
working_directory
subscription download working directory
log_prefix
The message to print prefixed to the title, i.e. '{log_prefix} {title}'
"""
threading.Thread.__init__(self)
self.working_directory = working_directory
self.log_prefix = log_prefix
self.complete = False
self._files_read: Set[str] = set()
@classmethod
def _get_title_from_info_json(cls, path: Path) -> Optional[str]:
try:
with open(path, "r", encoding="utf-8") as file:
file_json = json.load(file)
except JSONDecodeError:
# swallow the error since this is only printing logs
return None
return file_json.get("title")
@classmethod
def _is_info_json(cls, path: Path) -> bool:
if path.is_file():
_, ext = os.path.splitext(path)
return ext == ".json"
return False
def loop(self) -> None:
"""
Read new files in the directory and print their titles
"""
for path in Path(self.working_directory).rglob("*"):
if path.name not in self._files_read and self._is_info_json(path):
title = self._get_title_from_info_json(path)
self._files_read.add(path.name)
if title:
logger.info("%s %s", self.log_prefix, title)
def run(self):
"""
Loops over new files and prints their titles
"""
while not self.complete:
self.loop()
time.sleep(0.1) | ytdl-sub | /ytdl_sub-2023.9.2.post4-py3-none-any.whl/ytdl_sub/thread/log_entries_downloaded_listener.py | log_entries_downloaded_listener.py |
# ytdl
Yet another CLI based YouTube downloader tool for linux.
[](https://badge.fury.io/py/ytdl)
[](https://travis-ci.org/ndaidong/ytdl)
[](https://coveralls.io/github/ndaidong/ytdl?branch=master)
### Features:
- Download only neccessary stream, not the whole video
- Download all videos or audios belong to a playlist
- Download with custom name and bounding
Just gather your favourite videos into a playlist, then let's `ytdl` download them overnight.
## Prerequisites
- Fedora 32+, Ubuntu 20+, Debian 10+
- [python](https://www.python.org/) 3.6.5 or newer
- [ffmpeg](https://ffmpeg.org/) 4.0 or newer
- [YouTube API key](https://developers.google.com/youtube/registering_an_application)
## Install
Recommend to use [pipx](https://pipxproject.github.io/pipx/):
```bash
pipx install ytdl
ytdl config
```
However, `pip` may work too:
```bash
pip install ytdl
# don't
sudo pip install ytdl
```
Build from source requires [poetry](https://python-poetry.org/):
```bash
git clone [email protected]:ndaidong/ytdl.git && cd ytdl
poetry install
# use raw script
poetry run python main.py info
poetry run python main.py [command] [arguments]
# build wheel to `./dist` folder
poetry build
# then install it
pipx install dist/ytdl-VERSION-py3-none-any.whl
# test it
ytdl info
```
## CLIs
### Basic commands
| Command | Description | Shortcut |
|--|--|--|
| `ytdl config KEY VALUE` | Set config value | `c` |
| `ytdl config KEY` | Show config property |
| `ytdl playlist PLAYLIST_URL` | Get playlist index | `p` |
| `ytdl video VIDEO_URL` | Download a video | `v` |
| `ytdl audio VIDEO_URL` | Download audio only | `a` |
Almost cases, `VIDEO_URL` and `PLAYLIST_URL` can be replaced with video ID or playlist ID.
### Advanced usage
#### Config
There are 2 properties to configure: `api_key` and `store_dir`.
At the first time, `api_key` is empty and you have to set it before using other features.
```bash
# set new `api_key`
ytdl config api_key YOUR_OWN_YOUTUBE_API_KEY
# change `store_dir` to new path
ytdl config store_dir /storage/downloads/youtube
# get the current value of `api_key`
ytdl config api_key
# show all
ytdl config
```
By default, `store_dir` is being set to `/home/{YOUR_USER_NAME}/ytdl_files`, you should change it to more appropriate place.
#### Playlist
Note that this command does not download actual video/audio, but a list of indexed items.
```bash
# get playlist metadata into `{store_dir}/{title}.json`
# this file contains a list of videos with their ID and title to download later
ytdl playlist https://www.youtube.com/playlist?list=PLAYLIST_ID
# get playlist metadata into `{store_dir}/my_custom_playlist_name.json`
ytdl playlist https://www.youtube.com/playlist?list=PLAYLIST_ID my_custom_playlist_name
```
For example if we download the playlist [Linux Tips and Tricks](https://www.youtube.com/playlist?list=PLSmXPSsgkZLsw-vEwve1O7w-Row9TIVqi)
The indexed file looks like below:

Then we will have some powerful ways to download the videos in this list with `ytdl video` or `ytdl audio`.
#### Video
Download a single video file.
```bash
# download a video file to `{store_dir}/video/{VIDEO_TITLE}.mp4`
ytdl video https://www.youtube.com/watch?v=VIDEO_ID
# custom name
ytdl video https://www.youtube.com/watch?v=VIDEO_ID my_custom_video_name
```
To download multi items from indexed playlist, please refer the following arguments:
- `--index_file`: path to playlist index file (required)
- `--since`: video ID of the video where you want to start downloading from
- `--limit`: number of items to download, count from `since` or the begining of the list
- `--prefix_name`: to auto naming downloaded file
- `--prefix_num`: to auto naming downloaded file
Examples:
```bash
# download all videos from saved playlist index file above
# these video files will be stored in `{store_dir}/video`
ytdl video --index_file "/path/to/Linux Tips and Tricks.json"
# download 5 videos from saved playlist index file above, since first item
ytdl video --index_file "/path/to/Linux Tips and Tricks.json" --limit 5
# download 5 videos from saved playlist index file above, with prefix name
ytdl video --index_file "/path/to/Linux Tips and Tricks.json" --limit 5 --prefix_name "Linux Tutorial"
# downloaded videos should look like "Linux Tutorial - 1.mp4", "Linux Tutorial - 2.mp4" and so on
# prefix_name will be useful when you want to put these files into an already created list for your different purpose
# download 5 videos from saved playlist index file above, with prefix name and prefix number
ytdl video --index_file "/path/to/Linux Tips and Tricks.json" --limit 5 --prefix_name "Linux Tutorial" --prefix_num 25
# this will be useful for the playlists those are splited to multi parts
# in this case, your serie "Linux Tutorial" had already 24 items before, now count from 25 onwards
# downloaded videos should look like "Linux Tutorial - 25.mp4", "Linux Tutorial - 26.mp4" and so on
# similar to above command, but start from given item
ytdl video --index_file "/path/to/Linux Tips and Tricks.json" --since VIDEO_ID --limit 5 --prefix_name "Linux Tutorial" --prefix_num 25
```
While downloading video, the stream with highest `resolution` will be selected.
#### Audio
This is similar to `ytdl video`, but only download audio file.
While downloading, the stream with highest `abr` (average bitrate) will be selected.
```bash
# download a audio file to `{store_dir}/audio/{VIDEO_TITLE}.mp3`
ytdl audio https://www.youtube.com/watch?v=VIDEO_ID
# custom name
ytdl audio https://www.youtube.com/watch?v=VIDEO_ID my_custom_audio_name
```
To download multi items from indexed playlist, please refer the following arguments:
- `--index_file`: path to playlist index file (required)
- `--since`: video ID of the video where you want to start downloading from
- `--limit`: number of items to download, count from `since` or the begining of the list
- `--prefix_name`: to auto naming downloaded file
- `--prefix_num`: to auto naming downloaded file
Examples:
```bash
# download all audios from saved playlist index file above
# these audio files will be stored in `{store_dir}/audio`
ytdl audio --index_file "/path/to/Linux Tips and Tricks.json"
# download 5 audios from saved playlist index file above, since first item
ytdl audio --index_file "/path/to/Linux Tips and Tricks.json" --limit 5
# download 5 audios from saved playlist index file above, with prefix name
ytdl audio --index_file "/path/to/Linux Tips and Tricks.json" --limit 5 --prefix_name "Linux Tutorial"
# downloaded audios should look like "Linux Tutorial - 1.mp3", "Linux Tutorial - 2.mp3" and so on
# prefix_name will be useful when you want to put these files into an already created list for your different purpose
# download 5 audios from saved playlist index file above, with prefix name and prefix number
ytdl audio --index_file "/path/to/Linux Tips and Tricks.json" --limit 5 --prefix_name "Linux Tutorial" --prefix_num 25
# this will be useful for the playlists those are splited to multi parts
# in this case, your serie "Linux Tutorial" had already 24 items before, now count from 25 onwards
# downloaded audios should look like "Linux Tutorial - 25.mp3", "Linux Tutorial - 26.mp3" and so on
# similar to above command, but start from given item
ytdl audio --index_file "/path/to/Linux Tips and Tricks.json" --since VIDEO_ID --limit 5 --prefix_name "Linux Tutorial" --prefix_num 25
```
Downloaded stream will be convert to .mp3 with `ffmpeg`.
## Dependencies
This lib was built on top of the following packages:
| Dependency | License |
|--|--|
| [pytube3](https://github.com/get-pytube/pytube3) | MIT |
| [python-youtube](https://github.com/sns-sdks/python-youtube) | MIT |
| [python-fire](https://github.com/google/python-fire) | Apache License v2 |
## Test
```bash
git clone [email protected]:ndaidong/ytdl.git && cd ytdl
poetry install
./run_test.sh
```
# License
The MIT License (MIT)
| ytdl | /ytdl-1.0.0rc3.tar.gz/ytdl-1.0.0rc3/README.md | README.md |
# YT-DLD
### Just another youtube downloader
##### [*] Description
Run it, enter and a link and choose between available options to download
### [+] Installation
##### Install dependencies (git, python)
- For Debian (Ubuntu, Kali-Linux, Parrot)
- ```sudo apt install git python3 -y```
- For Arch (Manjaro)
- ```sudo pacman -S git python3 --noconfirm```
- For Redhat(Fedora)
- ```sudo dnf install git python3 -y```
- For Termux
- ```pkg install git python3-y```
##### Clone this repository
- ```git clone https://github.com/KasRoudra/ytdld```
##### Enter the directory
- ```cd ytdld```
##### Install all modules
- ```pip3 install -r files/requirements.txt --break-system-packages```
##### Run the tool
- ```python3 ytdld.py```
### Pip
- `pip3 install ytdld` [For Termux]
- `sudo pip3 install ytdld --break-system-packages` [For Linux]
- `ytdld`
### [~] Options
```
usage: ytdld.py [-h] [-u URL] [-d DIRECTORY] [-t TYPE] [-f FORMAT]
[-q QUALITY] [-b]
options:
-h, --help show this help message and exit
-u URL, --url URL Youtube url of the media
-d DIRECTORY, --directory DIRECTORY
Download path, the folder in which media will be saved
-t TYPE, --type TYPE Type of the media
-f FORMAT, --format FORMAT
File Format or Extension of the media
-q QUALITY, --quality QUALITY
Resolution or Bitrate the media
-b, --best Automatically download the media of best resolution or
bitrate
```
### [*] Features
- Supports argument for filetype or media quality
- Best quality can be automatically chosen
- Defaults can be set in the config file
### [-] Requirements
- `Python(3)`
- `pytube`
- `questionary`
- `kasutils`
### [*] Support
#### Want to show support? Just spread the word and smash the star button
###### Donate BTC: ***3Lx8ikQQgZZpmFJzHDBuQHXzLXMeWMcZF3***
## [~] Find Me on :
- [](https://github.com/KasRoudra)
- [](mailto:[email protected])
- [](https://facebook.com/KasRoudra)
- [](https://m.me/KasRoudra)
- [](https://t.me/KasRoudra)
| ytdld | /ytdld-0.0.3.tar.gz/ytdld-0.0.3/README.md | README.md |
from argparse import ArgumentParser
from importlib import (
import_module as eximport
)
from subprocess import (
CompletedProcess,
run
)
from os import (
getenv,
mkdir
)
from os.path import (
dirname,
isdir,
isfile
)
from json import (
dumps,
loads
)
def shell(command: str, capture_output=False) -> CompletedProcess:
"""
Run shell commands in python
"""
return run(command, shell=True, capture_output=capture_output, check=True)
def inst_module(pip_module: str):
"""
Try to install pip modules
"""
try:
eximport(pip_module)
return True
except ImportError:
try:
shell(f"pip3 install {pip_module} --break-system-packages")
except ImportError:
return False
try:
eximport(pip_module)
return True
except ImportError:
return False
modules = ["kasutils", "pytube", "questionary"]
for module in modules:
if not inst_module(module):
print(f"{module} can't be installed. Install in manually!")
exit()
from kasutils import (
BLACK,
BLUE,
CYAN,
ENCODING,
GREEN,
PURPLE,
RED,
YELLOW,
ask,
error,
info,
info2,
success,
cat,
center_text,
clear,
copy,
delete,
grep,
is_online,
lolcat,
move,
readable,
rename,
sed,
sprint,
)
from pytube import (
YouTube
)
from pytube.cli import (
on_progress
)
from questionary import (
select,
text
)
VERSION = "0.0.1"
logo = f"""
{RED}__ _______ ____ _ ____
{BLUE}\ \ / /_ _| | _ \| | | _ \
{GREEN} \ V / | |_____| | | | | | | | |
{CYAN} | | | |_____| |_| | |___| |_| |
{YELLOW} |_| |_| |____/|_____|____/
{CYAN}{" "*28}[{BLUE}v{PURPLE}{VERSION[2:]}{CYAN}]
{BLUE}{" "*20}[{CYAN}By {GREEN}KasRoudra{BLUE}]
"""
home = getenv("HOME")
config_file = f"{home}/.config/ytdld/config.json"
argparser = ArgumentParser()
argparser.add_argument(
"-u",
"--url",
help="Youtube url of the media"
)
argparser.add_argument(
"-d",
"--directory",
help="Download path, the folder in which media will be saved"
)
argparser.add_argument(
"-t",
"--type",
help="Type of the media"
)
argparser.add_argument(
"-f",
"--format",
help="File Format or Extension of the media"
)
argparser.add_argument(
"-q",
"--quality",
help="Resolution or Bitrate the media"
)
argparser.add_argument(
"-b",
"--best",
help="Automatically download the media of best resolution or bitrate",
action="store_true"
)
args = argparser.parse_args()
arg_url = args.url
arg_dir = args.directory
arg_type = args.type
arg_format = args.format
arg_quality = args.quality
best = args.best
def parse_url():
"""
Check for argument url
If not available ask for url input
"""
url = text("Enter youtube url:").ask() if arg_url is None else arg_url
# url = "https://youtube.com/watch?v=dCxSsr5xuL8&feature=share9"
if url is None:
return
elif "https://youtube.com/" in url:
ytid = url.split("=")[1].split("&")[0]
if not len(ytid)==11:
print(f"{error}NotYouTubeURLError: This is not a valid youtube url!")
return
# https://youtu.be/rdC1_jZtWKE
elif "https://youtu.be/" in url:
ytid = url.split("/")[-1]
if not len(ytid)==11:
print(f"{error}NotYouTubeURLError: This is not a valid youtube url!")
return
else:
pass
return url
def parse_config() -> dict:
"""
Read config or create one if doesn't exists
"""
if isfile(config_file):
with open(config_file, encoding=ENCODING) as conf:
config = loads(conf.read())
else:
config = {
"audio": {
"allow": True,
"formats": [
"mp4",
"webm"
],
"qualities": [
"48kbps",
"50kbps",
"70kbps",
"128kbps",
"160kbps"
]
},
"video": {
"allow": True,
"formats": [
"mp4",
"webm"
],
"qualities": [
"144p",
"360p",
"480p",
"720p",
"1080p",
"1440p",
"2180p"
]
},
"mixed": {
"allow": True,
},
"directory": f"{home}/Download"
}
if not isdir(dirname(config_file)):
mkdir(dirname(config_file))
with open(config_file, "w", encoding=ENCODING) as conf:
conf.write(dumps(config, indent=4))
return config
def get_choice(streams) -> list:
"""
Returns a list of choices based on streams.
The index of choices refers to same item in streams list by index
"""
choices = []
for stream in streams:
extension = stream.mime_type.split("/")[1]
quality = stream.resolution or stream.abr
if stream.is_progressive:
filetype = "Mixed"
else:
filetype = stream.type.title()
if stream.filesize < 1024:
filesize = f"{round(stream.filesize, 2)} B"
if stream.filesize > 1024:
filesize = f"{round(stream.filesize_kb, 2)} KB"
if stream.filesize > 1024 ** 2:
filesize = f"{round(stream.filesize_mb, 2)} MB"
if stream.filesize > 1024 ** 3:
filesize = f"{round(stream.filesize_gb, 2)} GB"
choices.append(f"{filetype}: {stream.title}-{quality}.{extension} {filesize}")
return choices
def filter_streams(streams):
"""
Filter streams by given configs or argument
"""
filtered_streams = []
config = parse_config()
mixed = config["mixed"]["allow"] if arg_type is None else arg_type == "mixed"
audio = config["audio"]
video = config["video"]
formats = audio["formats"] + video["formats"] if arg_format is None else [arg_format]
qualities = audio["qualities"] + video["qualities"] if arg_quality is None else [arg_quality]
for stream in streams:
extension = stream.mime_type.split("/")[1]
quality = stream.resolution or stream.abr
allowed = config[stream.type]["allow"] if arg_type is None else arg_type == stream.type
progressive = mixed and stream.is_progressive
formative = extension in formats
qualitive = quality in qualities
if progressive or allowed:
if formative and qualitive:
filtered_streams.append(stream)
if best:
best_quality = 0
best_extension = "webm" if arg_format is None else arg_format
best_streams = []
for best_stream in filtered_streams:
pattern = "([0-9]*)"
cur_qua = best_stream.resolution or best_stream.abr
cur_quality = int(grep(pattern, cur_qua))
cur_extenstion = best_stream.mime_type.split("/")[1]
print(cur_extenstion == best_extension)
print(cur_quality > best_quality)
if cur_extenstion == best_extension and cur_quality > best_quality:
best_quality = cur_quality
best_streams = [best_stream]
print(best_streams)
filtered_streams = best_streams
return filtered_streams
def download_stream(stream):
"""
Download video from stream
"""
config = parse_config()
directory = config["directory"] if arg_dir is None else arg_dir
if not isdir(directory):
mkdir(directory)
quality = stream.resolution or stream.abr
extension = stream.mime_type.split("/")[1]
filename = f"{stream.title}-{quality}.{extension}"
print(f"{info}Staring download of {filename}")
stream.download(output_path=directory)
if isfile(f"{directory}/{filename}"):
sprint("{info2} Downloaded successfully!")
def download(url):
"""
Validate url and show download options
"""
if url is None:
return
youtube = YouTube(url, on_progress_callback=on_progress)
streams = youtube.streams
filtered_streams = filter_streams(streams)
choices = get_choice(filtered_streams)
if len(filtered_streams) == 0:
print(f"{error}NotFoundError: No media matched for config/argument")
exit()
elif len(filtered_streams) == 1:
download_stream(filtered_streams[0])
elif len(filtered_streams) > 1:
chosen = select(
"Choose your option:",
choices=choices
).ask()
if chosen is not None:
index = choices.index(chosen)
download_stream(filtered_streams[index])
else:
print(f"{error}FilterError: There were some error in filtering")
exit()
def main():
"""
Entrypoint of script
"""
try:
clear(logo=logo)
url = parse_url()
download(url)
except KeyboardInterrupt:
print(f"\n{info2}KeyboardInterrupt: Shutting down due to user interrption")
exit(0)
except Exception as err:
print(err)
if __name__ == "__main__":
main() | ytdld | /ytdld-0.0.3.tar.gz/ytdld-0.0.3/ytdld.py | ytdld.py |
# 🎵 ytdlmusic
`ytdlmusic` is a command-line program to search and download music files from YouTube without use browser.
This package is directly available from [pypi](https://pypi.org/project/ytdlmusic/)
# :warning: Disclaimer
It may be illegal to download restricted content with this software, depending on the law in your country.
This package use two very important dependencies :
- [yt_dlp](https://pypi.org/project/yt_dlp/), a fork from [youtube-dl](https://github.com/ytdl-org/youtube-dl)
- [youtube-search-python](https://pypi.org/project/youtube-search-python/)
# 💫 How use **ytdlmusic**
`ytdlmusic [KEY WORDS]`

# 💫 How use **ytdlmusic** in batch mode
You can use a command to loop in a csv file, and download all MP3 files from it.
`ytdlmusic --batch path_file had_header sep columns_to_concatenate`

with csv file (for the demo's example)
```
song_column;artist_column;unused column
limujii;above;no
nomyn;awake;use
eyazttyzaeyz;zhhezhahkzaj;inexistant
scandinavianz;avalon;information
```
# 🚀 Other commands and flags
` ytdlmusic` , ` ytdlmusic --help` or ``ytdlmusic -h`` display help message.
`ytdlmusic --update` or `ytdlmusic -u` upgrade ytdlmusic.
`ytdlmusic --fullupdate` or `ytdlmusic -U` upgrade ytdlmusic and the dependencies yt-dlp and youtube-search-python.
`ytdlmusic --version` or `ytdlmusic -v` display version of ytdlmusic and the dependencies.
You can also add these flags to your commands (except for help and version) :
`--auto` or `-y` : Use auto mode: choose the first item for classic use, auto-accept other commands.
`--choices X` or `-N X` : Set the number of choices (default=5, min=1, max=10).
`--k` or `--keep` : Keep the YouTube video title for the filename.
`--t` or `--tag` : Use tags of the downloaded file to rename it.
`--m4a` or `-f` : Use M4A format.
`--ogg` or `-o` : Use OGG format.
`--Q` or `--quality` : Set quality to 320kbs instead of 256kbs for MP3 format.
`--quiet` or `-q` : Give less output.
`--verbose` or `-d` : Give more output.
`--nocolor` or `-n` : Disable colors and emojis in sysout.
# ⚙️ Install
See [this page](INSTALL.md)
# :question: FAQ
See [this page](FAQ.md)
# :construction_worker: Contribution
## For contributors
Go to [CONTRIBUTING.md](CONTRIBUTING.md). You have to read and accept this [Code of conduct](./CODE_OF_CONDUCT.md).
## Tanks to contributors
Thanks to contributors and dependencies authors :
- [albenquer](https://github.com/albenquer), [dlicois](https://github.com/dlicois) and [Jean-Phi Baconnais](https://github.com/jeanphibaconnais) for contributions !
- [Hitesh Kumar Saini](https://github.com/alexmercerind) for [youtube-search-python](https://github.com/alexmercerind/youtube-search-python)
- [yt-dlp](https://github.com/yt-dlp) for [yt-dlp](https://github.com/yt-dlp/yt-dlp)
- [devsnd](https://github.com/devsnd) for [tinytag](https://github.com/devsnd/tinytag)
- [avian2](https://github.com/avian2) for [unidecode](https://github.com/avian2/unidecode)
- [tartley](https://github.com/tartley) for [colorama](https://github.com/tartley/colorama)
- [vaultboy](https://pypi.org/user/vaultboy) for [termcolor](https://pypi.org/project/termcolor/)
- [Federico Carboni](https://github.com/FedericoCarboni) for [setup-ffmpeg](https://github.com/FedericoCarboni/setup-ffmpeg)
- [pypa](https://github.com/pypa) for [gh-action-pypi-publish](https://github.com/pypa/gh-action-pypi-publish)
- [elgohrf](https://github.com/elgohr) for [Github-Release-Action](https://github.com/elgohr/Github-Release-Action)
# :package: Changelog
See [this page](CHANGELOG.md)
# License
MIT License
Copyright (c) 2021 [thib1984](https://github.com/thib1984)
See [this page](LICENSE.txt) for details
| ytdlmusic | /ytdlmusic-2.2.1.tar.gz/ytdlmusic-2.2.1/README.md | README.md |
import json
import re
from datetime import datetime, timedelta
from itertools import count
from urllib.parse import unquote_plus
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
ExtractorError,
LazyList,
OnDemandPagedList,
float_or_none,
int_or_none,
parse_duration,
parse_iso8601,
traverse_obj,
update_url_query,
)
from ytdlp_plugins.probe import probe_media
__version__ = "2023.06.03"
# pylint: disable=abstract-method
class DTubePluginIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?v/
(?P<id>[0-9a-z.-]+/[\w-]+)
"""
IE_NAME = "d.tube"
GATEWAY_URLS = {
"ipfs": [
"https://player.d.tube/ipfs",
"https://ipfs.d.tube/ipfs",
"https://ipfs.io/ipfs",
],
"btfs": ["https://player.d.tube/btfs", "https://btfs.d.tube/btfs"],
"sia": ["https://siasky.net"],
}
REDIRECT_TEMPLATES = {
"vimeo": "https://vimeo.com/{}",
"twitch": "https://www.twitch.tv/{}",
"youtube": "{}",
"facebook": "https://www.facebook.com/video.php?v={}",
"dailymotion": "https://www.dailymotion.com/video/{}",
}
# pylint: disable=line-too-long
_TESTS = [
{
"url": "https://d.tube/v/truehnchannel3.0/QmNXrihTWmHiLVXxLabfXFakFzJVLq1LimfH2X1sNma5ak",
"md5": "d7b147de74210442e6b14d934f96c585",
"info_dict": {
"id": "truehnchannel3.0/QmNXrihTWmHiLVXxLabfXFakFzJVLq1LimfH2X1sNma5ak",
"title": "HAPPY WEEKEND #TRUEHNERS #TRUEHNER FROM THE TEAM OF TRUE_HUMANNATURE - THE TRUEHN BACKED FASHION & BEAUTY BRAND ",
"description": "md5:452385a9aef03447baa2aae9c801eb14",
"ext": "mp4",
"thumbnail": "https://ipfs.io/ipfs/QmcFBCAx8c8PLew7JfjVmn3VK6MjrSMfCerq5b5y9iNdp8?filename=AAAAAAAAAAA%20TRUEHUMAN%20NATURE.jpg",
"tags": ["truehnchannel"],
"duration": 3,
"uploader_id": "truehnchannel3.0",
"upload_date": "20230603",
"timestamp": 1685782622.873,
},
"params": {
"format": "src",
},
},
{
# using steemit API
"url": "https://d.tube/v/cahlen/hcyx513ospn",
"md5": "fd03f59d2c1f7b1e0ed5a2098116e443",
"info_dict": {
"id": "cahlen/hcyx513ospn",
"title": "Wizard's Life - February 20th, 2022",
"description": "md5:4308b3aac098bf762489eeeea290b8e1",
"ext": "mp4",
"thumbnail": "https://ipfs.cahlen.org/ipfs/"
"QmW9PQUeZAZZ2zryMp5kEVQqpKjJpHNGGUShmojcsW4zQZ",
"tags": ["dtube", "life"],
"duration": 1119,
"uploader_id": "cahlen",
"upload_date": "20220220",
"timestamp": 1645382061.0,
},
"params": {
"format": "src",
},
"expected_warnings": ["Unable to download avalon metadata"],
},
# dailymotion forward
{
"url": "https://d.tube/#!/v/charliesmithmusic11/cup890u4sra",
"info_dict": {
"id": "x86k2uu",
"title": str,
"description": str,
"ext": str,
"uploader": str,
"uploader_id": str,
"upload_date": str,
"timestamp": (int, float),
"extractor": "dailymotion",
},
"params": {
"skip_download": True,
},
},
# YouTube forward
{
"url": "https://d.tube/#!/v/geneeverett33/74w7hgkthww",
"info_dict": {
"id": "rmFZqbh7TaU",
"title": str,
"description": str,
"ext": str,
"uploader": str,
"uploader_id": str,
"upload_date": str,
"extractor": "youtube",
},
"params": {
"skip_download": True,
},
},
]
def formats(self, files):
# pylint: disable=undefined-loop-variable
for provider, default_gateways in self.GATEWAY_URLS.items():
if provider in files and "vid" in files[provider]:
break
else:
return []
gateway = files[provider].get("gw", "").rstrip("/")
if gateway and not re.match(r".*/(?:btfs|ipfs)$", gateway):
gateway = f"{gateway}/{provider}"
if gateway in default_gateways:
default_gateways.remove(gateway)
default_gateways.insert(0, gateway)
loop_gateways = list(default_gateways)
if gateway and gateway not in loop_gateways:
loop_gateways.insert(0, gateway)
formats = []
for format_id, content_id in sorted(files[provider].get("vid", {}).items()):
for gateway in list(loop_gateways):
self.write_debug(f"checking media via gateway {gateway!r}")
media_url = f"{gateway}/{content_id}"
probed_format, *_ = probe_media(self, media_url)
if "filesize" in probed_format:
media_format = {**probed_format, "format_id": format_id}
break
loop_gateways.remove(gateway)
else:
media_format = None
if media_format:
formats.append(media_format)
return formats
@staticmethod
def fallback_files(info):
files = {}
for provider in ("ipfs", "btfs"):
provider_info = info.get(provider, {})
for key, value in provider_info.items():
match = re.match(r"video(\d*)hash", key)
if match is None:
continue
resolution = match.group(1) or "src"
files.setdefault(provider, {}).setdefault("vid", {})[resolution] = value
return files
def avalon_api(self, endpoint, video_id, **kwargs):
options = dict(
note="Downloading avalon metadata",
errnote="Unable to download avalon metadata",
fatal=False,
)
options.update(kwargs)
result = self._download_json(
f"https://avalon.d.tube/{endpoint}",
video_id,
**options,
)
return result
def steemit_api(self, video_id):
data = {
"id": 0,
"jsonrpc": "2.0",
"method": "call",
"params": ["condenser_api", "get_state", [f"/dtube/@{video_id}"]],
}
result = self._download_json(
"https://api.steemit.com/",
video_id,
headers={"Content-Type": "application/json"},
data=json.dumps(data).encode("utf-8"),
note="Downloading steemit metadata",
)
content = traverse_obj(result, ("result", "content", video_id), default={})
metadata = json.loads(content.get("json_metadata", "{}"))
if not metadata.get("video"):
raise ExtractorError(
"Steemit metadata not availabe", video_id=video_id, expected=True
)
return {
"_id": video_id,
"author": content.get("author"),
"link": content.get("permlink"),
"json": metadata.get("video", {}),
"ts": (parse_iso8601(content.get("last_update")) or 0) * 1000,
"tags": dict.fromkeys(metadata.get("tags", ()), 0),
}
def entry_from_avalon_result(self, result, from_playlist=False):
video_id = f"{result['author']}/{result['link']}"
info = result["json"]
video_provider = info.get("files", {})
redirect_ies = set(self.REDIRECT_TEMPLATES.keys()) & set(video_provider.keys())
if redirect_ies:
redirect_ie = redirect_ies.pop()
redirect_url = self.REDIRECT_TEMPLATES[redirect_ie].format(
video_provider[redirect_ie]
)
elif "url" in info:
redirect_url = info["url"]
else:
redirect_url = None
if from_playlist or redirect_url:
_type = "url"
formats = None
else:
_type = "video"
formats = self.formats(info.get("files", self.fallback_files(info)))
tags = result.get("tags")
if tags:
tags = list(tags.keys()) if isinstance(tags, dict) else [tags]
else:
tags = []
entry_info = {
"_type": _type,
"url": redirect_url or f"https://d.tube/v/{video_id}",
"id": video_id,
"title": traverse_obj(info, ("title",), ("info", "title")),
"description": traverse_obj(
info, ("desc",), ("description",), ("content", "description")
),
"thumbnail": info.get("thumbnailUrl"),
"tags": tags,
"duration": float_or_none(
traverse_obj(info, ("duration",), ("info", "duration"))
)
or int_or_none(info.get("dur"))
or parse_duration(info.get("dur")),
"timestamp": float_or_none(result.get("ts"), scale=1000),
"uploader_id": result.get("author"),
}
if formats is not None:
entry_info["formats"] = formats
return entry_info
def _real_extract(self, url):
video_id = self._match_id(url)
result = self.avalon_api(f"content/{video_id}", video_id)
if not result:
result = self.steemit_api(video_id)
return self.entry_from_avalon_result(result)
class DTubeUserPluginIE(DTubePluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?c/
(?P<id>[0-9a-z.-]+)
"""
IE_NAME = "d.tube:user"
_TESTS = [
{
"url": "https://d.tube/#!/c/cahlen",
"playlist_mincount": 100, # type: ignore
"info_dict": {
"id": "cahlen",
"title": "cahlen",
},
},
]
def iter_entries(self, user_id, endpoint):
page_size = 50
last_id = None
for page in count(1):
result = self.avalon_api(
f"{endpoint}/{last_id}" if last_id else endpoint,
user_id,
note=f"Downloading page {page}",
)
start_idx = 1 if result and result[0]["_id"] == last_id else 0
for item in result[start_idx:]:
yield self.entry_from_avalon_result(item, from_playlist=True)
if len(result) < page_size:
return
if result:
last_id = result[-1]["_id"]
def _real_extract(self, url):
user_id = self._match_id(url)
endpoint = f"blog/{user_id}"
return self.playlist_result(
LazyList(self.iter_entries(user_id, endpoint)),
playlist_id=user_id,
playlist_title=user_id,
)
class DTubeQueryPluginIE(DTubeUserPluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?
(?P<id>hotvideos|trendingvideos|newvideos)
"""
IE_NAME = "d.tube:query"
_TESTS = [
{
"url": "https://d.tube/#!/hotvideos",
"playlist_mincount": 100, # type: ignore
"info_dict": {
"id": "hotvideos",
"title": "hotvideos",
},
},
{
"url": "https://d.tube/trendingvideos",
"playlist_mincount": 50, # type: ignore
"info_dict": {
"id": "trendingvideos",
"title": "trendingvideos",
},
},
{
"url": "https://d.tube/newvideos",
"playlist_mincount": 50, # type: ignore
"info_dict": {
"id": "newvideos",
"title": "newvideos",
},
},
]
def _real_extract(self, url):
query_id = self._match_id(url)
assert query_id.endswith("videos")
endpoint = query_id[: -len("videos")]
return self.playlist_result(
LazyList(self.iter_entries(query_id, endpoint)),
playlist_id=query_id,
playlist_title=query_id,
)
class DTubeSearchPluginIE(DTubePluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?[st]/
(?P<id>[^?]+)
"""
IE_NAME = "d.tube:search"
_TESTS = [
{
"url": "https://d.tube/#!/s/crypto+currency",
"playlist_mincount": 60, # type: ignore
"info_dict": {
"id": "crypto+currency",
"title": "crypto currency",
},
},
{
"url": "https://d.tube/t/gaming",
"playlist_mincount": 20, # type: ignore
"info_dict": {
"id": "gaming",
"title": "gaming",
},
},
]
def _real_extract(self, url):
page_size = 30
search_term_quoted = self._match_id(url)
search_term = unquote_plus(search_term_quoted)
if "/t/" in url:
# tag search
timestamp = int((datetime.now() - timedelta(weeks=52)).timestamp() * 1e3)
payload = {
"q": f"(NOT pa:*) AND ts:>={timestamp} AND tags:{search_term}",
"sort": "ups:desc",
}
else:
# common search
payload = {"q": f"(NOT pa:*) AND {search_term}"}
def fetch_page(page_number):
offset = page_number * page_size
result = self._download_json(
update_url_query(
"https://search.d.tube/avalon.contents/_search",
{**payload, "size": page_size, "from": offset},
),
search_term,
note=f"Downloading entries from offset {offset:3}",
fatal=False,
)
if not result:
return
for hit in traverse_obj(result, ["hits", "hits"], default=()):
yield self.entry_from_avalon_result(hit["_source"], from_playlist=True)
return self.playlist_result(
OnDemandPagedList(fetch_page, page_size),
playlist_id=search_term_quoted,
playlist_title=search_term,
) | ytdlp-auf1 | /ytdlp_auf1-2023.7.10-py3-none-any.whl/yt_dlp_plugins/extractor/dtube.py | dtube.py |
import json
import re
import time
from contextlib import suppress
from shlex import shlex
from urllib.error import HTTPError
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.extractor.peertube import PeerTubeIE
from yt_dlp.utils import (
ExtractorError,
OnDemandPagedList,
base_url,
clean_html,
js_to_json,
parse_duration,
parse_iso8601,
traverse_obj,
urljoin,
)
__version__ = "2023.07.10"
class JSHLEX(shlex):
def __init__(self, instream):
super().__init__(
instream=instream, infile=None, posix=True, punctuation_chars=False
)
self.whitespace = ", \t\r\n"
self.whitespace_split = True
def __next__(self):
value = super().__next__()
try:
json.loads(value)
except json.JSONDecodeError:
quote_escaped = value.replace('"', '\\"')
value = f'"{quote_escaped}"'
return value
# pylint: disable=abstract-method
class Auf1IE(InfoExtractor):
IE_NAME = "auf1"
_VALID_URL = r"""(?x)
(?:https?:)?//
(?:www\.)?
(?:auf1\.tv/)
(?P<category>[^/]+/)?
(?P<id>[^/]+)
"""
peertube_extract_url = None
_TESTS = [
{
"url": "https://auf1.tv/nachrichten-auf1/ampelkoalition-eine-abrissbirne-fuer-deutschland/",
"info_dict": {
"id": "rKjpWNnocoARnj4pQMRKXQ",
"title": "Ampelkoalition: Eine Abrissbirne für Deutschland?",
"description": "md5:9265dda76d30e842e1f75aa3cb3e3884",
"ext": "mp4",
"thumbnail": r"re:https://videos\.auf1\.tv/static/thumbnails/[\w-]+\.jpg",
"timestamp": 1638446905,
"upload_date": "20211202",
"uploader": "AUF1.TV",
"uploader_id": "25408",
"duration": 818,
"view_count": int,
"like_count": int,
"dislike_count": int,
"categories": ["News & Politics"],
},
"params": {"skip_download": True},
"expected_warnings": ["JSON API"],
},
{
"url": "https://auf1.tv/nachrichten-auf1/nachrichten-auf1-vom-15-dezember-2022/",
"info_dict": {
"id": "nVBERN4MzFutVzoXsADf8F",
"title": "Nachrichten AUF1 vom 15. Dezember 2022",
"description": "md5:bc4def34dcc8401d84c5127c5f759543",
"ext": "mp4",
"thumbnail": r"re:https://videos\.auf1\.tv/static/thumbnails/[\w-]+\.jpg",
"timestamp": 1671121411,
"upload_date": "20221215",
"uploader": "AUF1.TV",
"uploader_id": "25408",
"duration": 1825,
"view_count": int,
"like_count": int,
"dislike_count": int,
"categories": ["News & Politics"],
},
"params": {"skip_download": True},
"expected_warnings": ["JSON API"],
},
{ # JSON API without payload.js
"url": "https://auf1.tv/stefan-magnet-auf1/"
"heiko-schoening-chaos-und-krieg-gehoeren-leider-zu-deren-plan/",
"info_dict": {
"id": "dVk8Q3VNMLi7b7uhyuSSp6",
"ext": "mp4",
"title": "Heiko Schöning: „Chaos und Krieg gehören leider zu deren Plan“",
"description": "md5:6fb9e7eb469fc544223018a2ff3c998c",
"timestamp": int,
"uploader": str,
"uploader_id": str,
"upload_date": "20220307",
"channel": str,
"channel_url": "contains:/video-channels/auf1.tv",
"duration": 2089,
"view_count": int,
"like_count": int,
"dislike_count": int,
"tags": [],
"categories": ["News & Politics"],
},
"params": {"skip_download": True},
"expected_warnings": [
"Retrying due to too many requests.",
"The read operation timed out",
"JSON API",
],
},
{
# playlist for category
"url": "https://auf1.tv/nachrichten-auf1/",
"info_dict": {
"id": "nachrichten-auf1",
"title": "Nachrichten AUF1",
"description": "md5:dcb992e2bb7fd020a417634b949f2951",
},
"playlist_mincount": 100,
"expected_warnings": [
"Retrying due to too many requests.",
"The read operation timed out",
"JSON API",
],
},
{
# playlist for all videos
"url": "https://auf1.tv/videos",
"info_dict": {
"id": "all_videos",
"title": "AUF1.TV - Alle Videos",
},
"playlist_mincount": 200,
"expected_warnings": [
"Retrying due to too many requests.",
"JSON API",
],
},
]
@staticmethod
def parse_url(url: str):
if not url:
return None
match = re.match(r"^https?://([^/]+)/videos/embed/([^?]+)", url)
# pylint: disable=consider-using-f-string
return "peertube:{}:{}".format(*match.groups()) if match else None
@staticmethod
def sparse_info(metadata):
return {
"id": metadata.get("public_id", "unknown"),
"url": metadata.get("videoUrl"),
"title": metadata.get("title"),
"description": clean_html(traverse_obj(metadata, "text", "preview_text")),
"duration": parse_duration(metadata.get("duration")),
"timestamp": parse_iso8601(metadata.get("published_at") or None),
"thumbnail": metadata.get("thumbnail_url"),
}
def call_api(self, endpoint, video_id=None, fatal=True):
return self._download_json(
f"https://auf1.tv/api/{endpoint}",
video_id=video_id,
fatal=fatal,
errnote="JSON API",
)
def call_with_retries(
self,
operation,
http_error_map=None,
):
http_error_map = http_error_map or {}
max_duration_s = 30.0
sleep_duration_s = 5.0
attempt_count = 0
while True:
start = time.time()
try:
return operation()
except ExtractorError as exc:
attempt_count += 1
errorcode = None
if isinstance(exc.cause, HTTPError):
errorcode = exc.cause.code
sleep_duration_s = float(
exc.cause.headers.get("retry-after", sleep_duration_s)
)
if max_duration_s < 0.0:
self.report_warning(f"Giving up after {attempt_count} attempts.")
elif errorcode == 429:
self.report_warning(
f"Retrying in {sleep_duration_s:.0f} seconds due to too many requests."
)
max_duration_s -= time.time() - start
time.sleep(sleep_duration_s)
continue
for errors, exception in http_error_map.items():
if isinstance(errors, int):
errors = {errors}
if errorcode in errors:
raise exception from exc
raise
def peertube_extract(self, url):
if self.peertube_extract_url is None:
peertube_extractor = self._downloader.get_info_extractor(
PeerTubeIE.ie_key()
)
self.peertube_extract_url = getattr(peertube_extractor, "_real_extract")
return self.call_with_retries(
lambda: self.peertube_extract_url(url),
)
def playlist_from_entries(self, all_videos, **kwargs):
entries = []
for item in all_videos:
public_id = item.get("public_id")
if not public_id:
continue
category = traverse_obj(item, ("show", "public_id"), default="video")
entries.append(
{
"_type": "url",
"ie_key": self.ie_key(),
**self.sparse_info(item),
"url": f"//auf1.tv/{category}/{public_id}/",
}
)
return self.playlist_result(
entries,
**kwargs,
)
def _payloadjs(self, url, page_id):
webpage = self._download_webpage(url, page_id)
payloadjs_url = self._search_regex(
r'href="([^"]+/_?payload.js)"', webpage, "payload url"
)
payloadjs_url = urljoin(base_url(url), payloadjs_url)
payload_js = self._download_webpage(
payloadjs_url, page_id, note="Downloading payload.js"
)
match = re.match(
r"""(?x)
.*
\(function\ *\( (?P<vars>[^)]*) \)
\{\ *return\ * (?P<metadata>\{.+}) .*}
\( (?P<values>.*) \){2}
""",
payload_js,
)
if match is None:
raise ExtractorError("Failed parsing payload.js")
variables, metadata, values = match.groups()
var_mapping = dict(zip(variables.split(","), JSHLEX(values)))
control_character_mapping = dict.fromkeys(range(32))
js_string = js_to_json(metadata, vars=var_mapping).translate(
control_character_mapping
)
return json.loads(js_string)
def _metadata(self, url, *, page_id, method="api"):
if method == "api":
return self.call_with_retries(
lambda: self.call_api(f"getContent/{page_id}", page_id),
http_error_map={500: ExtractorError("JSON API failed (500)")},
)
payload = self._payloadjs(url, page_id)
return payload["data"].popitem()[1]
def _real_extract(self, url):
category, page_id = self._match_valid_url(url).groups()
# single video
if category:
try:
metadata = self._metadata(url, page_id=page_id, method="api")
except ExtractorError as exc:
self.report_warning(exc, page_id)
metadata = self._metadata(url, page_id=page_id, method="payloadjs")
peertube_url = self.parse_url(
traverse_obj(metadata, ("videoUrl",), ("videoUrls", "peertube"))
)
return (
self.peertube_extract(peertube_url)
if peertube_url
else self.sparse_info(metadata)
)
# video playlist
if page_id == "videos":
return self.playlist_from_entries(
self.call_with_retries(
lambda: self.call_api("getVideos", video_id="all_videos"),
),
playlist_id="all_videos",
playlist_title="AUF1.TV - Alle Videos",
)
try:
metadata = self.call_with_retries(
lambda: self.call_api(f"getShow/{page_id}", page_id),
)
except ExtractorError as exc:
self.report_warning(exc, page_id)
metadata = self._metadata(url, page_id=page_id, method="payloadjs")
return self.playlist_from_entries(
metadata.get("contents"),
playlist_id=page_id,
playlist_title=metadata.get("name"),
description=clean_html(metadata.get("description")),
)
# pylint: disable=abstract-method
class Auf1RadioIE(InfoExtractor):
IE_NAME = "auf1:radio"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?
auf1\.radio
(?: /
(?P<category>[^/]+/)?
(?P<id>[^/]+)
)?
/?
"""
_TESTS = [
{
"url": "https://auf1.radio/nachrichten-auf1/worte-der-hoffnung-ein-sammelband-der-mut-macht/",
"md5": "3a0d00dd473f46b387678621420fad8e",
"info_dict": {
"id": "worte-der-hoffnung-ein-sammelband-der-mut-macht",
"ext": "mp3",
"title": "Worte der Hoffnung: Ein Sammelband, der Mut macht",
"description": "md5:3102f277e87e1baafc7f09242b66a071",
"duration": 70,
"timestamp": 1669539605,
"upload_date": "20221127",
"thumbnail": "re:https://auf1.*.jpg",
},
},
{
# playlist for category
"url": "https://auf1.radio/nachrichten-auf1/",
"info_dict": {
"id": "nachrichten-auf1",
"title": "Nachrichten AUF1",
},
"playlist_mincount": 50,
},
{
# playlist for all media
"url": "https://auf1.radio/",
"info_dict": {
"id": "all",
"title": "all",
},
"playlist_mincount": 50,
},
]
MP3_FORMAT = {
"ext": "mp3",
"acodec": "mp3",
"vcodec": "none",
"asr": 48000,
"tbr": 64,
"abr": 64,
"format": "MP2/3 (MPEG audio layer 2/3)",
}
def call_api(self, endpoint, **kwargs):
kwargs.setdefault("errnote", "JSON API")
return self._download_json(
f"https://auf1.radio/api/{endpoint}",
**kwargs,
)
def formats(self, url: str, duration):
format_info = {"url": url}
if url.endswith(".mp3"):
format_info.update(self.MP3_FORMAT)
if duration:
format_info["filesize_approx"] = duration * 8000
return [format_info]
def entry_from_info(self, info, media_id):
return {
"id": info.get("content_public_id", media_id),
"title": info["title"],
"description": info.get("summary"),
"duration": info.get("duration"),
"timestamp": parse_iso8601(info.get("created_at")),
"thumbnail": info.get("thumbnail")
and f"https://auf1.tv/images/{info['thumbnail']}",
"formats": self.formats(info.get("audio_url"), info.get("duration")),
}
def entries_from_playlist(self, playlist_id):
endpoint = "" if playlist_id == "all" else "getShow/"
def fetch(page, _last_page):
page_note = (
f"{page+1}/{_last_page}" if isinstance(_last_page, int) else page + 1
)
return self.call_api(
f"{endpoint}{playlist_id}",
query={"page": page + 1},
video_id=playlist_id,
note=f"Downloading page {page_note}",
)
first_page = fetch(0, None)
last_page = first_page.get("last_page", "NA")
page_size = first_page.get("per_page", len(first_page.get("data", ())))
playlist_title = playlist_id
with suppress(KeyError, IndexError):
if playlist_id != "all":
playlist_title = first_page["data"][0]["show_name"]
def load_page(index):
info = fetch(index, last_page) if index > 0 else first_page
for media_info in info["data"]:
audiofile = media_info.get("audiofile")
if audiofile:
media_info["audio_url"] = f"https://auf1.radio/storage/{audiofile}"
yield self.entry_from_info(media_info, playlist_id)
return self.playlist_result(
entries=OnDemandPagedList(load_page, page_size),
playlist_id=playlist_id,
playlist_title=playlist_title,
playlist_count=first_page.get("total", "NA"),
)
def _real_extract(self, url):
category, page_id = self._match_valid_url(url).groups()
if category:
info = self.call_api(f"get/{page_id}", video_id=page_id)
if not info:
raise ExtractorError("not available")
return self.entry_from_info(info, page_id)
return self.entries_from_playlist(page_id or "all") | ytdlp-auf1 | /ytdlp_auf1-2023.7.10-py3-none-any.whl/yt_dlp_plugins/extractor/auf1.py | auf1.py |
import json
import re
from contextlib import suppress
from typing import Callable
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.extractor.youtube import YoutubeIE
from yt_dlp.postprocessor import FFmpegPostProcessor
from yt_dlp.utils import (
ExtractorError,
HEADRequest,
OnDemandPagedList,
UnsupportedError,
clean_html,
determine_ext,
int_or_none,
)
from ytdlp_plugins.utils import ParsedURL
__version__ = "2022.01.02"
EXTERNAL_URL_EXTRACTORS = (YoutubeIE,)
# pylint: disable=abstract-method
class BitTubeIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://(?:www\.)?bittube.tv/
post/
(?P<id>[0-9a-f-]+)
"""
IE_NAME = "bittube"
BASE_URL = "https://bittube.tv/"
_TESTS = [
{
"url": "https://bittube.tv/post/215f2674-6250-4bda-8955-6afe2718cca3",
"md5": "fe6036bee0c4494f39540e65f3eb3ad6",
"info_dict": {
"id": "215f2674-6250-4bda-8955-6afe2718cca3",
"title": "God Doesn't Want Anyone To Perish",
"description": "md5:962173b9525785518eeaf14adf04ec58",
"ext": "mp4",
"is_live": False,
"thumbnail": "contains:newpost/115366/bittube_115366_1640398063933.jpg?token=",
"duration": 25.38,
"uploader": "AnotherVoiceintheDarkness",
"channel": "Asher Brown",
"channel_id": "AnotherVoiceintheDarkness",
"channel_url": "https://bittube.tv/profile/AnotherVoiceintheDarkness",
"timestamp": float,
"upload_date": "20211225",
"view_count": int,
"like_count": int,
},
"params": {},
},
]
def __init__(self, downloader=None):
self._magic_token = None
self.ffmpeg = FFmpegPostProcessor(downloader)
super().__init__(downloader)
def _media_url(self):
js_string = self._download_webpage(
f"{self.BASE_URL}js/main.js",
video_id=None,
note="Downloading media server url",
)
for match_string in re.findall(
r'{(?:\s*[A-Z_]+\s*:\s*"[^"]*"\s*,?)+\s*}', js_string
):
mapping = dict(re.findall(r'(\w+)\s*:\s*"([^"]*)"', match_string))
if "MEDIA_SRV_URL" in mapping:
return f"{mapping['MEDIA_SRV_URL']}/static/posts/"
return None
def _real_initialize(self):
if not self._get_cookies(self.BASE_URL):
self._request_webpage(
HEADRequest(self.BASE_URL),
video_id=None,
note="Setting Cookies",
)
def _call_api(self, endpoint, data, video_id, what=None):
headers = {
"Content-Type": "application/json;charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
}
result = self._download_json(
f"{self.BASE_URL}api/{endpoint}",
video_id,
data=json.dumps(data, separators=(",", ":")).encode(),
headers=headers,
note=f"Downloading {what or 'JSON metadata'}",
)
with suppress(KeyError, TypeError):
if result["success"] is False:
raise ExtractorError(f"{endpoint}: {result['mssg']}")
return result
@property
def magic_token(self):
if self._magic_token is None:
self._magic_token = self._call_api(
"generate-magic-token", {}, None, what="magic token"
)
return self._magic_token
def media_url(self, src):
if re.match(r"[a-z]+://", src):
return src
return src and (
f"https://webseed1.bittube.tv/mediaServer/static/posts/"
f"{src}?token={self.magic_token}"
)
def ffprobe_format(self, media_url):
# Invoking ffprobe to determine resolution
self.to_screen("Checking format with ffprobe")
timeout = self.get_param("socket_timeout")
timeout = int(timeout * 1e6) if timeout else 2000000
metadata = self.ffmpeg.get_metadata_object(
media_url, opts=("-timeout", str(timeout))
)
if not metadata:
return {}
v_stream = {}
a_stream = {}
for stream in metadata["streams"]:
if not v_stream and stream["codec_type"] == "video":
v_stream.update(stream)
elif not a_stream and stream["codec_type"] == "audio":
a_stream.update(stream)
extension_map = {
"matroska": "mkv",
"hls": "mp4",
"mp4": "mp4",
"jpeg_pipe": "jpg",
}
extensions = metadata["format"]["format_name"].split(",")
for ext in extensions:
if ext in extension_map:
extension = extension_map[ext]
break
else:
extension = extensions[0]
fps = None
if "r_frame_rate" in v_stream:
match = re.match(r"(\d+)(?:/(\d+))?", v_stream["r_frame_rate"])
if match:
nom, den = match.groups()
fps = round(int(nom) / int(den or 1))
return {
"url": media_url,
"ext": extension,
"container": extension,
"vcodec": v_stream.get("codec_name", "none"),
"acodec": a_stream.get("codec_name", "none"),
"fps": fps,
"asr": int_or_none(a_stream.get("sample_rate")),
"tbr": int_or_none(metadata["format"].get("bit_rate"), scale=1000),
"vbr": int_or_none(v_stream.get("bit_rate"), scale=1000),
"abr": int_or_none(a_stream.get("bit_rate"), scale=1000),
"height": int_or_none(v_stream.get("height")),
"width": int_or_none(v_stream.get("width")),
"filesize": int_or_none(metadata["format"].get("size")),
}
def formats(self, info, details=True):
url = info.pop("url")
if not url:
info["formats"] = []
return
ext = determine_ext(url, default_ext="unknown_video")
format_info = {"url": url, "ext": ext.lower()}
if details and self.ffmpeg.probe_available:
format_info.update(self.ffprobe_format(url))
elif ext == "m3u8":
format_info["ext"] = "mp4"
elif details and ext not in {"jpg", "gif", "png"}:
response = self._request_webpage(
HEADRequest(url),
video_id=None,
note="Checking media url",
errnote="Media error",
fatal=False,
)
if response:
if ext == "unknown_video":
format_info["ext"] = response.headers["Content-Type"].split("/")[-1]
format_info["filesize"] = int_or_none(
response.headers.get("Content-Length")
)
info["formats"] = [format_info]
def entry_from_result(self, result, from_playlist=False):
url = None
is_live = False
_type = "video"
timestamp = result.get("post_time")
duration_mins = result.get("mediaDuration")
if result["streamactive"]:
if "streamchannel" in result and "streamfeed" in result:
url = self._call_api(
"livestream/obtaintokenurl",
{"channel": result["streamchannel"], "feed": result["streamfeed"]},
result.get("post_id"),
what="token url",
).get("url")
is_live = bool(url)
else:
_type = "url"
url = f"{self.BASE_URL}post/{result.get('post_id')}"
url = url or self.media_url(result.get("imgSrc"))
# check for external forward urls
if result.get("mediaType") == "external":
for extractor in EXTERNAL_URL_EXTRACTORS:
# pylint: disable=protected-access
if extractor._match_valid_url(url):
_type = "url"
break
entry_info = {
"_type": _type,
"id": result.get("post_id"),
"title": re.sub(r"\s+", " ", clean_html(result["title"])).strip(),
"description": clean_html(result.get("description"))
or clean_html(result.get("mediaDescr")),
"url": url,
"is_live": is_live,
"thumbnail": self.media_url(result.get("thumbSrc")),
"duration": duration_mins and result.get("mediaDuration") * 60,
"uploader": result.get("username"),
"channel": result.get("fullname"),
"channel_id": result.get("username"),
"channel_url": f"{self.BASE_URL}profile/{result.get('username')}",
"timestamp": timestamp and timestamp * 1e-3,
"view_count": result.get("views"),
"like_count": result.get("likes_count"),
"media_type": result.get("mediaType"),
}
if entry_info["_type"] == "video":
self.formats(
entry_info, details=not from_playlist or self.get_param("listformats")
)
return entry_info
def _real_extract(self, url):
video_id = self._match_id(url)
result = self._call_api("get-post", {"post_id": video_id}, video_id)
return self.entry_from_result(result)
# pylint: disable=abstract-method
class BitTubeUserIE(BitTubeIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?bittube.tv/
profile/
(?P<id>\w+)
"""
IE_NAME = "bittube:user"
_TESTS = [
{
# all videos from channel playlist
"url": "https://bittube.tv/profile/AnotherVoiceintheDarkness",
"info_dict": {
"id": "AnotherVoiceintheDarkness",
"title": "Asher Brown",
"description": "An anonymous messenger trying to show people the truth about the "
"world they live in.",
},
"playlist_mincount": 30, # type: ignore
},
]
def _paged_entries(
self, endpoint: str, page_size: int, gen_query: Callable[[int, int], dict]
):
def fetch_page(page_number):
offset = page_number * page_size
query = gen_query(page_size, offset)
result = self._call_api(
endpoint,
query,
endpoint,
what=f"entries from offset {offset:3}",
)
for key in ("items", "posts"):
items = result.get(key)
if isinstance(items, list):
break
else:
items = []
for item in items:
yield self.entry_from_result(item, from_playlist=True)
return OnDemandPagedList(fetch_page, page_size)
def _real_extract(self, url):
page_size = 30
username = self._match_id(url)
details = self._call_api("get-user-details", {"username": username}, username)[
"details"
]
def gen_query(limit, offset):
return {"user": details["id"], "limit": limit, "offset": offset}
return self.playlist_result(
self._paged_entries("get-user-posts", page_size, gen_query),
playlist_id=username,
playlist_title=details.get("fullname"),
playlist_description=details.get("bio"),
)
# pylint: disable=abstract-method
class BitTubeQueryIE(BitTubeUserIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?bittube.tv/
(?:recommended|explore|topics?)(?:$|[/?])
"""
IE_NAME = "bittube:query"
_TESTS = [
{
"url": "https://bittube.tv/recommended",
"playlist_mincount": 30,
"info_dict": {
"id": "recommended",
"title": "recommended",
},
},
{
"url": "https://bittube.tv/topic/catsoftube",
"playlist_mincount": 20,
"info_dict": {
"id": "topic/catsoftube",
"title": "topic/catsoftube",
},
},
]
def _real_extract(self, url):
page_size = 30
parsed_url = ParsedURL(
url,
regex=r"""(?x)
.*
(?:
(?:/
(?:explore-)?(?P<type>video|livestream|image|audio)s
) | (?:/topic/(?P<topic>\w+))
)
""",
)
term = parsed_url.query("term", default="")
navigation = parsed_url.query("navigation", default="New")
media_type = parsed_url.match("type")
playlist_id = parsed_url.path.lstrip("/")
if playlist_id.startswith("recommended"):
endpoint = "get-recommended-posts"
def gen_query(limit, offset):
return {
"from": offset,
"size": limit,
"sort": navigation,
"what": media_type or "all",
"term": term,
"explore": False,
}
elif playlist_id.startswith("explore"):
endpoint = "get-media-to-explore"
def gen_query(limit, offset):
return {
"type": media_type or "all",
"limit": limit,
"offset": offset,
"term": term,
"sort": navigation,
"newfirst": None,
}
elif playlist_id.startswith("topic/"):
topic = parsed_url.match("topic")
endpoint = "get-hashtag-posts"
def gen_query(limit, offset):
return {
"hashtag": topic,
"from": offset,
"size": limit,
"sort": navigation,
}
elif playlist_id.startswith("topics"):
endpoint = "get-posts-subscribed-topics"
def gen_query(limit, offset):
return {
"from": offset,
"size": limit,
"sort": navigation,
"what": media_type or "all",
}
else:
raise UnsupportedError(url)
return self.playlist_result(
self._paged_entries(endpoint, page_size, gen_query),
playlist_id=playlist_id,
playlist_title=playlist_id,
) | ytdlp-bittube | /ytdlp_bittube-2022.1.2-py3-none-any.whl/ytdlp_plugins/extractor/bittube.py | bittube.py |
import re
from contextlib import suppress
from sys import maxsize
from urllib.parse import parse_qs, parse_qsl, urlencode, urlparse, urlunparse
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
ExtractorError,
OnDemandPagedList,
UnsupportedError,
clean_html,
get_element_by_id,
int_or_none,
parse_duration,
parse_iso8601,
traverse_obj,
update_url_query,
urljoin,
)
from ytdlp_plugins.probe import headprobe_media
__version__ = "2022.12.17"
# pylint: disable=abstract-method
class BrighteonIE(InfoExtractor):
IE_NAME = "brighteon"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?
(?:brighteon\.com/)
(?:(?P<taxonomy>browse|channels|categories|watch)/)?
(?P<id>[a-zA-z0-9-]+)
"""
_EMBED_URL_RE = (
re.compile(
r"""(?x)
<iframe[^>]+src="
(?P<url>https?://(?:[a-z][\da-z]+\.)?
brighteon\.com/embed/[\da-zA-Z-]+)
[^"]*"
"""
),
)
_BASE_URL = "https://www.brighteon.com"
_MPEG_TS = True
_TESTS = [
{
"url": "https://www.brighteon.com/4f2586ec-66ac-4db7-ac72-efb5f0473406",
"md5": "9a6a3ce5c3391eccb71f995f530209d5",
"info_dict": {
"id": "4f2586ec-66ac-4db7-ac72-efb5f0473406",
"title": "10/26/2022 Let's Talk America: Dr. Alan Keyes ft. Dennis Pyle",
"ext": "mp3",
"description": 'Watch "Let\'s Talk America" Live on Brighteon.tv '
"every weekday from 2:00 pm - 3:00 pm estSupport "
"Let's Talk America by visiting or donating at https://iamtv.us/",
"timestamp": 1666814967,
"upload_date": "20221026",
"duration": 3033.0,
"channel": "BrighteonTV",
"channel_id": "123538c1-de87-46d0-a0ad-be8efebbfaa1",
"channel_url": "https://www.brighteon.com/channels/brighteontv",
"tags": [
"current events",
"bible",
"declaration of independence",
"scripture",
"american politics",
"constitutional rights",
"conservative patriot",
"lets talk america",
"dr alan keyes",
],
"thumbnail": "re:https?://[a-z]+.brighteon.com/(?:[a-z-]+/)+[a-f0-9-]+",
"view_count": int,
"like_count": int,
},
"params": {"check_formats": False, "format": "audio"},
},
{
# playlist
"url": "https://www.brighteon.com/watch/21824dea-3564-40af-a972-d014b987261b",
"info_dict": {
"id": "21824dea-3564-40af-a972-d014b987261b",
"title": "U.S. Senate Impeachment Trial",
},
"playlist_mincount": 10,
},
{
# channel
"url": "https://www.brighteon.com/channels/brighteontv",
"info_dict": {
"id": "123538c1-de87-46d0-a0ad-be8efebbfaa1",
"title": "BrighteonTV",
},
"playlist_mincount": 50,
},
{
# categories
"url": "https://www.brighteon.com/categories/4ad59df9-25ce-424d-8ac4-4f92d58322b9/videos",
"info_dict": {
"id": "4ad59df9-25ce-424d-8ac4-4f92d58322b9",
"title": "Health & Medicine",
},
"playlist_mincount": 50,
},
{
# browse
"url": "https://www.brighteon.com/browse/new-videos",
"info_dict": {
"id": "new-videos",
"title": "new-videos",
},
"playlist_mincount": 50,
},
{
# test embedded urls
"url": "https://sonsoflibertymedia.com/one-thing-that-the-officials-forgot-to-tell-you-about-the-vaccines-the-truth-video/",
"info_dict": {
"id": "one-thing-that-the-officials-forgot-to-tell-you-about-the-vaccines-the-truth-video",
"title": 'One Thing That The "Officials" Forgot To Tell You About The Vaccines, The Truth (Video) \u00bb Sons of Liberty Media',
"description": "md5:128ed10e870c40216b0d26bb227029f4",
"thumbnail": "https://sonsoflibertymedia.com/wp-content/uploads/2021/04/liars.jpg",
"age_limit": 0,
},
"params": {"playlistitems": "1"},
"playlist": [
{
"md5": "53e2dea64b626eb057bf01bb4a1d1f4a",
"info_dict": {
"id": "d0263fb5-45cc-4ef7-af6b-57b5484abf93",
"title": "The Same Ones Selling You The Virus, Are The Same Ones Selling You The Vaccines - Just Watch Them!",
"ext": "mp4",
"description": str,
"timestamp": int,
"upload_date": str,
},
}
],
},
]
@staticmethod
def page_props_path(suffix=None):
path = ["props", "initialProps", "pageProps"]
if suffix:
path.extend(suffix.split("."))
return path
def _json_api(self, url, video_id, **kwargs):
parsed_url = urlparse(url)
parsed_qs = parse_qs(parsed_url.query)
path = parsed_url.path.rstrip("/")
if path.startswith("/channels/") and path.endswith("/videos"):
path = path.replace("/videos", "/")
if path.startswith("/categories/") and not path.endswith("/videos"):
path = path + "/videos"
# noinspection PyProtectedMember
json_api_url = urlunparse(
parsed_url._replace(path="/api-v3" + path, query=urlencode(parsed_qs, True))
)
json_obj = self._download_json(json_api_url, video_id=video_id, **kwargs)
return json_obj
def _json_extract(self, url, video_id, note=None):
webpage = self._download_webpage(url, video_id=video_id, note=note)
try:
return self._parse_json(
get_element_by_id("__NEXT_DATA__", webpage), video_id=video_id
)
except TypeError as exc:
raise ExtractorError(
"Could not extract JSON metadata", video_id=video_id
) from exc
@staticmethod
def _rename_formats(formats, prefix):
for item in formats:
if "vcodec" in item and item["vcodec"] == "none":
language = item.get("language")
suffix = f"audio-{language}" if language else "audio"
else:
suffix = (
f'{item["height"]}p' if item.get("height") else item["format_id"]
)
item["format_id"] = f"{prefix}-{suffix}"
def _auto_merge_formats(self, formats):
requested_format = self.get_param("format")
audio_only = [
fmt["format_id"] for fmt in formats if fmt.get("vcodec") == "none"
]
video_only = {
fmt["format_id"] for fmt in formats if fmt.get("acodec") == "none"
}
if self._downloader and len(audio_only) == 1 and requested_format in video_only:
requested_format = f"{requested_format}+{audio_only[0]}"
self.to_screen(
f"Adding audio stream {audio_only[0]!r} to video only format"
)
self._downloader.format_selector = self._downloader.build_format_selector(
requested_format
)
def _download_formats(self, sources, video_id):
formats = []
if not sources:
return formats
for source in sources:
try:
url = source["src"]
typ = source.get("type", url[-3:])
except KeyError:
continue
if url.endswith(".m3u8"):
media_formats = self._extract_m3u8_formats(
url, video_id=video_id, fatal=False
)
self._rename_formats(media_formats, "hls")
if self._MPEG_TS:
mpg_formats = []
for fmt in media_formats:
mpg_fmt = {
key: value
for key, value in fmt.items()
if key not in {"url", "manifest_url", "protocol"}
}
mpg_fmt["url"] = fmt["url"].replace(".m3u8", ".ts")
mpg_formats.append(mpg_fmt)
self._rename_formats(mpg_formats, "mpeg")
media_formats.extend(mpg_formats)
elif url.endswith(".mpd"):
media_formats = self._extract_mpd_formats(
url, video_id=video_id, fatal=False
)
self._rename_formats(media_formats, "dash")
for fmt in media_formats:
fmt["manifest_stream_number"] = 0
else:
media_formats = ()
self.report_warning(f"unknown media format {typ!r}")
formats.extend(media_formats)
for fmt in formats:
fps = fmt.get("fps")
fmt["fps"] = fps and round(fps)
return formats
def _update_formats(self, formats):
for fmt in formats:
if fmt.get("height"):
fmt["fps"] = 30 if fmt["height"] >= 540 else 15
if self.get_param("check_formats") is False or not (
fmt.get("format_note", "").startswith("DASH video")
):
continue
info = headprobe_media(self, fmt["url"])[0]
fmt.update(info)
def _entry_from_info(self, video_info, channel_info, from_playlist=False):
video_id = video_info["id"]
url = f"{self._BASE_URL}/{video_id}"
duration = parse_duration(video_info.get("duration"))
if from_playlist:
_type = "url"
formats = None
else:
_type = "video"
formats = self._download_formats(
video_info.get("source", ()), video_id=video_id
)
if video_info.get("audio"):
formats.append(
{
"format_id": "audio",
"url": video_info["audio"],
"vcodec": "none",
"acodec": "mp4a.40.2",
"tbr": 192, # estimation for filesize_approx
"asr": 48000,
}
)
self._update_formats(formats)
self._auto_merge_formats(formats)
# merge channel_info items into video_info
for item in ("name", "id", "shortUrl"):
channel_item = channel_info.get(item)
if channel_item:
ci_name = f"channel{item[0].upper()}{item[1:]}"
video_info[ci_name] = channel_item
entry_info = {
"_type": _type,
"url": url,
"id": video_id,
"title": video_info.get("name"),
"description": clean_html(video_info.get("description")),
"timestamp": parse_iso8601(video_info.get("createdAt")),
"duration": duration,
"channel": video_info.get("channelName"),
"channel_id": video_info.get("channelId"),
"channel_url": video_info.get("channelShortUrl")
and f'{self._BASE_URL}/channels/{video_info["channelShortUrl"]}',
"tags": video_info.get("tags", []),
"thumbnail": video_info.get("thumbnail"),
"view_count": traverse_obj(
video_info, ("analytics", "videoView"), default=None
),
"like_count": int_or_none(video_info.get("likes")),
}
if formats is not None:
entry_info["formats"] = formats
return entry_info
def _paged_url_entries(self, page_id, url, start_page=None, use_json_api=True):
max_pages = None
def load_page(page_number):
page_url = update_url_query(url, {"page": page_number})
note = f"Downloading page {page_number}"
if max_pages:
note = f"{note}/{max_pages}"
if use_json_api:
return self._json_api(page_url, video_id=page_id, note=note)
json_obj = self._json_extract(page_url, video_id=page_id, note=note)
page_props = traverse_obj(json_obj, self.page_props_path(), default={})
return page_props.get("data") or page_props
data = load_page(start_page or "1")
channel_info = data.get("channel", {})
initial_video_list = data.get("videos")
if initial_video_list is None:
raise UnsupportedError(url)
page_cache = {1: initial_video_list}
page_size = len(initial_video_list)
pagination = data.get("pagination", data)
max_pages = pagination.get("pages", maxsize)
def fetch_entry(index):
page_idx, offset = divmod(index, page_size)
page_number = page_idx + 1
if (
start_page is None
and page_number not in page_cache
and page_number <= max_pages
):
video_list = load_page(page_number).get("videos", ())
page_cache.clear() # since we only need one entry
page_cache[page_number] = video_list
else:
video_list = page_cache.get(page_number, ())
with suppress(IndexError):
yield self._entry_from_info(
video_list[offset], channel_info, from_playlist=True
)
playlist_info = channel_info or data
return self.playlist_result(
entries=OnDemandPagedList(fetch_entry, 1),
playlist_id=playlist_info.get("id", page_id),
playlist_title=playlist_info.get("name", page_id),
playlist_count=page_size if start_page else pagination.get("count", "N/A"),
)
def _playlist_entries(self, playlist_info, url):
entries = []
for idx, video in enumerate(playlist_info.get("videosInPlaylist", ()), 1):
entries.append(
{
"_type": "url",
"url": update_url_query(url, {"index": idx}),
"title": video.get("videoName"),
"duration": parse_duration(video.get("duration")),
}
)
return self.playlist_result(
entries=entries,
playlist_id=playlist_info.get("playlistId"),
playlist_title=playlist_info.get("playlistName"),
playlist_count=len(entries),
)
def _real_extract(self, url: str):
match = self._match_valid_url(url)
taxonomy, video_id = match.groups()
parsed_url = dict(parse_qsl(urlparse(url).query))
self._set_cookie("brighteon.com", "adBlockClosed", "1")
if taxonomy in {"channels", "categories", "browse"}:
return self._paged_url_entries(
video_id,
url,
start_page=parsed_url.get("page"),
use_json_api=taxonomy != "browse",
)
json_obj = self._json_extract(url, video_id=video_id)
page_props = traverse_obj(json_obj, self.page_props_path(), default={})
playlist_info = page_props.get("playlist", {})
if playlist_info and parsed_url.get("index") is None:
return self._playlist_entries(playlist_info, url)
video_info = page_props.get("video", {})
channel_info = page_props.get("channel", {})
if video_info:
return self._entry_from_info(video_info, channel_info)
raise UnsupportedError(url)
class BrighteonTvIE(BrighteonIE):
IE_NAME = "brighteontv"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?
brighteon\.tv/?
"""
_BASE_URL = "https://www.brighteon.tv"
_MPEG_TS = False
_TESTS = [
{
"url": "https://www.brighteon.tv/LiveTV/",
"info_dict": {
"id": "brighteontv-daily-show",
"ext": "mp4",
"title": "startswith:Brighteon.TV Daily Show",
"description": str,
"channel_id": "8c536b2f-e9a1-4e4c-a422-3867d0e472e4",
"tags": [
"Brighteon.TV",
"Video",
"Live",
"Streaming",
"Shows",
"Events",
"Documentaries",
],
"is_live": True,
},
"params": {"skip_download": True},
},
{
"url": "https://www.brighteon.tv/LiveEspanol/",
"info_dict": {
"id": "brighteontv-espanol",
"ext": "mp4",
"title": "startswith:Brighteon.TV Espanol",
"description": str,
"channel_id": "8c536b2f-e9a1-4e4c-a422-3867d0e472e4",
"tags": [
"Brighteon.TV",
"Video",
"Live",
"Streaming",
"Shows",
"Events",
"Documentaries",
],
"is_live": True,
},
"params": {"skip_download": True},
},
{
"url": "https://www.brighteon.tv/LiveEvents/",
"info_dict": {
"id": "brighteon2-show",
"ext": "mp4",
"title": "startswith:Brighteon.TV Daily Show",
"description": str,
"channel_id": "8c536b2f-e9a1-4e4c-a422-3867d0e472e4",
"tags": [
"Brighteon.TV",
"Video",
"Live",
"Streaming",
"Shows",
"Events",
"Documentaries",
],
"is_live": True,
},
"params": {"skip_download": True},
},
]
def _real_extract(self, url):
video_id = self.ie_key()
webpage = self._download_webpage(url, video_id=video_id)
description = self._og_search_description(webpage)
tags = self._html_search_meta("keywords", webpage, default="")
stream_url = self._search_regex(
r'<iframe[^>]+src="(https?://[\w./-]+)"', webpage, "stream_url", fatal=False
)
if stream_url is None:
raise UnsupportedError(url)
json_obj = self._json_extract(stream_url, video_id=video_id)
stream_info = traverse_obj(json_obj, self.page_props_path("stream"))
video_info = self._entry_from_info(stream_info, {})
video_info.update(
dict(description=description, tags=tags.split(", "), is_live=True)
)
return video_info
class BrighteonRadioIE(BrighteonIE):
IE_NAME = "brighteonradio"
_VALID_URL = r"(?P<base>https?://(?:www\.)?brighteonradio\.com)/?"
_BASE_URL = "https://www.brighteonradio.com"
_MPEG_TS = False
_TESTS = [
{
"url": "https://www.brighteonradio.com/",
"info_dict": {
"id": "BrighteonRadio",
"ext": "mp4",
"title": "startswith:Brighteon Radio",
"description": "Free Speech Audio Streaming for Humanity",
"tags": ["Brighteon", "Radio", "News", "Audio", "Streaming"],
"is_live": True,
},
"params": {"skip_download": True},
},
]
def _real_extract(self, url):
video_id = self.ie_key()
webpage = self._download_webpage(url, video_id=video_id)
player_url = self._search_regex(
r'<script[^>]+src="([^"]+/Player\w*.js)"', webpage, "player_url"
)
player_js = self._download_webpage(
urljoin(self._BASE_URL, player_url),
note="Downloading JS player",
video_id=video_id,
)
stream_url = self._search_regex(
r"^\s*var\s+[^'\"/]+['\"](https?://[^'\"]+/index\.m3u8)['\"]",
player_js,
"stream_url",
flags=re.MULTILINE,
)
formats = self._extract_m3u8_formats(stream_url, video_id)
for fmt in formats:
fmt["height"] = fmt["width"] = None
fmt["vcodec"] = "none"
ffmpeg_args = self.get_param("external_downloader_args")
if isinstance(ffmpeg_args, dict):
ffmpeg_args.setdefault("ffmpeg_o", []).append("-vn")
tags = [
tag.strip()
for tag in self._html_search_meta("keywords", webpage, default="").split(
","
)
]
return {
"id": video_id,
"title": self._og_search_title(webpage, default="Brighteon Radio"),
"description": self._og_search_description(webpage),
"tags": tags,
"is_live": True,
"formats": formats,
} | ytdlp-brighteon | /ytdlp_brighteon-2022.12.17-py3-none-any.whl/ytdlp_plugins/extractor/brighteon.py | brighteon.py |
import json
import re
from datetime import datetime, timedelta
from itertools import count
from urllib.parse import unquote_plus
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
ExtractorError,
LazyList,
OnDemandPagedList,
float_or_none,
int_or_none,
parse_duration,
parse_iso8601,
traverse_obj,
update_url_query,
)
from ytdlp_plugins.probe import probe_media
__version__ = "2023.06.03"
# pylint: disable=abstract-method
class DTubePluginIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?v/
(?P<id>[0-9a-z.-]+/[\w-]+)
"""
IE_NAME = "d.tube"
GATEWAY_URLS = {
"ipfs": [
"https://player.d.tube/ipfs",
"https://ipfs.d.tube/ipfs",
"https://ipfs.io/ipfs",
],
"btfs": ["https://player.d.tube/btfs", "https://btfs.d.tube/btfs"],
"sia": ["https://siasky.net"],
}
REDIRECT_TEMPLATES = {
"vimeo": "https://vimeo.com/{}",
"twitch": "https://www.twitch.tv/{}",
"youtube": "{}",
"facebook": "https://www.facebook.com/video.php?v={}",
"dailymotion": "https://www.dailymotion.com/video/{}",
}
# pylint: disable=line-too-long
_TESTS = [
{
"url": "https://d.tube/v/truehnchannel3.0/QmNXrihTWmHiLVXxLabfXFakFzJVLq1LimfH2X1sNma5ak",
"md5": "d7b147de74210442e6b14d934f96c585",
"info_dict": {
"id": "truehnchannel3.0/QmNXrihTWmHiLVXxLabfXFakFzJVLq1LimfH2X1sNma5ak",
"title": "HAPPY WEEKEND #TRUEHNERS #TRUEHNER FROM THE TEAM OF TRUE_HUMANNATURE - THE TRUEHN BACKED FASHION & BEAUTY BRAND ",
"description": "md5:452385a9aef03447baa2aae9c801eb14",
"ext": "mp4",
"thumbnail": "https://ipfs.io/ipfs/QmcFBCAx8c8PLew7JfjVmn3VK6MjrSMfCerq5b5y9iNdp8?filename=AAAAAAAAAAA%20TRUEHUMAN%20NATURE.jpg",
"tags": ["truehnchannel"],
"duration": 3,
"uploader_id": "truehnchannel3.0",
"upload_date": "20230603",
"timestamp": 1685782622.873,
},
"params": {
"format": "src",
},
},
{
# using steemit API
"url": "https://d.tube/v/cahlen/hcyx513ospn",
"md5": "fd03f59d2c1f7b1e0ed5a2098116e443",
"info_dict": {
"id": "cahlen/hcyx513ospn",
"title": "Wizard's Life - February 20th, 2022",
"description": "md5:4308b3aac098bf762489eeeea290b8e1",
"ext": "mp4",
"thumbnail": "https://ipfs.cahlen.org/ipfs/"
"QmW9PQUeZAZZ2zryMp5kEVQqpKjJpHNGGUShmojcsW4zQZ",
"tags": ["dtube", "life"],
"duration": 1119,
"uploader_id": "cahlen",
"upload_date": "20220220",
"timestamp": 1645382061.0,
},
"params": {
"format": "src",
},
"expected_warnings": ["Unable to download avalon metadata"],
},
# dailymotion forward
{
"url": "https://d.tube/#!/v/charliesmithmusic11/cup890u4sra",
"info_dict": {
"id": "x86k2uu",
"title": str,
"description": str,
"ext": str,
"uploader": str,
"uploader_id": str,
"upload_date": str,
"timestamp": (int, float),
"extractor": "dailymotion",
},
"params": {
"skip_download": True,
},
},
# YouTube forward
{
"url": "https://d.tube/#!/v/geneeverett33/74w7hgkthww",
"info_dict": {
"id": "rmFZqbh7TaU",
"title": str,
"description": str,
"ext": str,
"uploader": str,
"uploader_id": str,
"upload_date": str,
"extractor": "youtube",
},
"params": {
"skip_download": True,
},
},
]
def formats(self, files):
# pylint: disable=undefined-loop-variable
for provider, default_gateways in self.GATEWAY_URLS.items():
if provider in files and "vid" in files[provider]:
break
else:
return []
gateway = files[provider].get("gw", "").rstrip("/")
if gateway and not re.match(r".*/(?:btfs|ipfs)$", gateway):
gateway = f"{gateway}/{provider}"
if gateway in default_gateways:
default_gateways.remove(gateway)
default_gateways.insert(0, gateway)
loop_gateways = list(default_gateways)
if gateway and gateway not in loop_gateways:
loop_gateways.insert(0, gateway)
formats = []
for format_id, content_id in sorted(files[provider].get("vid", {}).items()):
for gateway in list(loop_gateways):
self.write_debug(f"checking media via gateway {gateway!r}")
media_url = f"{gateway}/{content_id}"
probed_format, *_ = probe_media(self, media_url)
if "filesize" in probed_format:
media_format = {**probed_format, "format_id": format_id}
break
loop_gateways.remove(gateway)
else:
media_format = None
if media_format:
formats.append(media_format)
return formats
@staticmethod
def fallback_files(info):
files = {}
for provider in ("ipfs", "btfs"):
provider_info = info.get(provider, {})
for key, value in provider_info.items():
match = re.match(r"video(\d*)hash", key)
if match is None:
continue
resolution = match.group(1) or "src"
files.setdefault(provider, {}).setdefault("vid", {})[resolution] = value
return files
def avalon_api(self, endpoint, video_id, **kwargs):
options = dict(
note="Downloading avalon metadata",
errnote="Unable to download avalon metadata",
fatal=False,
)
options.update(kwargs)
result = self._download_json(
f"https://avalon.d.tube/{endpoint}",
video_id,
**options,
)
return result
def steemit_api(self, video_id):
data = {
"id": 0,
"jsonrpc": "2.0",
"method": "call",
"params": ["condenser_api", "get_state", [f"/dtube/@{video_id}"]],
}
result = self._download_json(
"https://api.steemit.com/",
video_id,
headers={"Content-Type": "application/json"},
data=json.dumps(data).encode("utf-8"),
note="Downloading steemit metadata",
)
content = traverse_obj(result, ("result", "content", video_id), default={})
metadata = json.loads(content.get("json_metadata", "{}"))
if not metadata.get("video"):
raise ExtractorError(
"Steemit metadata not availabe", video_id=video_id, expected=True
)
return {
"_id": video_id,
"author": content.get("author"),
"link": content.get("permlink"),
"json": metadata.get("video", {}),
"ts": (parse_iso8601(content.get("last_update")) or 0) * 1000,
"tags": dict.fromkeys(metadata.get("tags", ()), 0),
}
def entry_from_avalon_result(self, result, from_playlist=False):
video_id = f"{result['author']}/{result['link']}"
info = result["json"]
video_provider = info.get("files", {})
redirect_ies = set(self.REDIRECT_TEMPLATES.keys()) & set(video_provider.keys())
if redirect_ies:
redirect_ie = redirect_ies.pop()
redirect_url = self.REDIRECT_TEMPLATES[redirect_ie].format(
video_provider[redirect_ie]
)
elif "url" in info:
redirect_url = info["url"]
else:
redirect_url = None
if from_playlist or redirect_url:
_type = "url"
formats = None
else:
_type = "video"
formats = self.formats(info.get("files", self.fallback_files(info)))
tags = result.get("tags")
if tags:
tags = list(tags.keys()) if isinstance(tags, dict) else [tags]
else:
tags = []
entry_info = {
"_type": _type,
"url": redirect_url or f"https://d.tube/v/{video_id}",
"id": video_id,
"title": traverse_obj(info, ("title",), ("info", "title")),
"description": traverse_obj(
info, ("desc",), ("description",), ("content", "description")
),
"thumbnail": info.get("thumbnailUrl"),
"tags": tags,
"duration": float_or_none(
traverse_obj(info, ("duration",), ("info", "duration"))
)
or int_or_none(info.get("dur"))
or parse_duration(info.get("dur")),
"timestamp": float_or_none(result.get("ts"), scale=1000),
"uploader_id": result.get("author"),
}
if formats is not None:
entry_info["formats"] = formats
return entry_info
def _real_extract(self, url):
video_id = self._match_id(url)
result = self.avalon_api(f"content/{video_id}", video_id)
if not result:
result = self.steemit_api(video_id)
return self.entry_from_avalon_result(result)
class DTubeUserPluginIE(DTubePluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?c/
(?P<id>[0-9a-z.-]+)
"""
IE_NAME = "d.tube:user"
_TESTS = [
{
"url": "https://d.tube/#!/c/cahlen",
"playlist_mincount": 100, # type: ignore
"info_dict": {
"id": "cahlen",
"title": "cahlen",
},
},
]
def iter_entries(self, user_id, endpoint):
page_size = 50
last_id = None
for page in count(1):
result = self.avalon_api(
f"{endpoint}/{last_id}" if last_id else endpoint,
user_id,
note=f"Downloading page {page}",
)
start_idx = 1 if result and result[0]["_id"] == last_id else 0
for item in result[start_idx:]:
yield self.entry_from_avalon_result(item, from_playlist=True)
if len(result) < page_size:
return
if result:
last_id = result[-1]["_id"]
def _real_extract(self, url):
user_id = self._match_id(url)
endpoint = f"blog/{user_id}"
return self.playlist_result(
LazyList(self.iter_entries(user_id, endpoint)),
playlist_id=user_id,
playlist_title=user_id,
)
class DTubeQueryPluginIE(DTubeUserPluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?
(?P<id>hotvideos|trendingvideos|newvideos)
"""
IE_NAME = "d.tube:query"
_TESTS = [
{
"url": "https://d.tube/#!/hotvideos",
"playlist_mincount": 100, # type: ignore
"info_dict": {
"id": "hotvideos",
"title": "hotvideos",
},
},
{
"url": "https://d.tube/trendingvideos",
"playlist_mincount": 50, # type: ignore
"info_dict": {
"id": "trendingvideos",
"title": "trendingvideos",
},
},
{
"url": "https://d.tube/newvideos",
"playlist_mincount": 50, # type: ignore
"info_dict": {
"id": "newvideos",
"title": "newvideos",
},
},
]
def _real_extract(self, url):
query_id = self._match_id(url)
assert query_id.endswith("videos")
endpoint = query_id[: -len("videos")]
return self.playlist_result(
LazyList(self.iter_entries(query_id, endpoint)),
playlist_id=query_id,
playlist_title=query_id,
)
class DTubeSearchPluginIE(DTubePluginIE):
_VALID_URL = r"""(?x)
https?://(?:www\.)?d\.tube/
(?:\#!/)?[st]/
(?P<id>[^?]+)
"""
IE_NAME = "d.tube:search"
_TESTS = [
{
"url": "https://d.tube/#!/s/crypto+currency",
"playlist_mincount": 60, # type: ignore
"info_dict": {
"id": "crypto+currency",
"title": "crypto currency",
},
},
{
"url": "https://d.tube/t/gaming",
"playlist_mincount": 20, # type: ignore
"info_dict": {
"id": "gaming",
"title": "gaming",
},
},
]
def _real_extract(self, url):
page_size = 30
search_term_quoted = self._match_id(url)
search_term = unquote_plus(search_term_quoted)
if "/t/" in url:
# tag search
timestamp = int((datetime.now() - timedelta(weeks=52)).timestamp() * 1e3)
payload = {
"q": f"(NOT pa:*) AND ts:>={timestamp} AND tags:{search_term}",
"sort": "ups:desc",
}
else:
# common search
payload = {"q": f"(NOT pa:*) AND {search_term}"}
def fetch_page(page_number):
offset = page_number * page_size
result = self._download_json(
update_url_query(
"https://search.d.tube/avalon.contents/_search",
{**payload, "size": page_size, "from": offset},
),
search_term,
note=f"Downloading entries from offset {offset:3}",
fatal=False,
)
if not result:
return
for hit in traverse_obj(result, ["hits", "hits"], default=()):
yield self.entry_from_avalon_result(hit["_source"], from_playlist=True)
return self.playlist_result(
OnDemandPagedList(fetch_page, page_size),
playlist_id=search_term_quoted,
playlist_title=search_term,
) | ytdlp-dtube | /ytdlp_dtube-2023.6.3-py3-none-any.whl/yt_dlp_plugins/extractor/dtube.py | dtube.py |
import warnings
from contextlib import ContextDecorator, ExitStack, contextmanager, suppress
from inspect import getmodule, stack
from typing import Any, Callable, Dict, Optional, cast
from unittest.mock import patch
try:
from typing import Protocol # pylint: disable=ungrouped-imports
except ImportError:
from typing_extensions import Protocol # type: ignore
import yt_dlp
from . import GLOBALS, __version__
from .utils import tabify, write_json_file
# mypy typing stub
# pylint: disable=too-few-public-methods
class Function(Protocol):
__globals__: Dict[str, Any]
__call__: Callable
class InverseDecorated(Protocol):
__original__: Callable
__call__: Callable
def monkey_patch(orig):
def decorator(func: Callable) -> InverseDecorated:
def decorated(*args, **kwargs):
return func(*args, **kwargs)
setattr(decorated, "__original__", orig)
return cast(InverseDecorated, decorated)
return decorator
def calling_plugin_class():
plugins = {str(cls) for cls in GLOBALS.FOUND.values()}
for frame_info in stack():
extractor_class = frame_info[0].f_locals.get("ie")
if str(extractor_class) in plugins:
return extractor_class
return None
@monkey_patch(yt_dlp.YoutubeDL.print_debug_header)
def plugin_debug_header(self):
plugin_list = []
for name, cls in GLOBALS.FOUND.items():
module = getmodule(cls)
version = getattr(cls, "__version__", None) or getattr(
module, "__version__", None
)
version = f"(v{version})" if version else ""
cls_path = f"{module.__name__}.{name}" if module else name
alt_name = getattr(cls, "IE_NAME", name)
plugin_list.append((f"[{alt_name}]", f"via {cls_path!r}", version))
if plugin_list:
plural_s = "s" if len(plugin_list) > 1 else ""
self.write_debug(
f"ytdlp-plugins (v{__version__}) loaded {len(plugin_list)} plugin{plural_s} "
"which are not part of yt-dlp. Use at your own risk."
)
for line in tabify(sorted(plugin_list), join_string=" "):
self.write_debug(" " + line)
else:
self.write_debug(f"ytdlp-plugins version {__version__}")
if GLOBALS.OVERRIDDEN:
self.write_debug("Overridden classes due to name collisions:")
items = [
(f"{name!r}", f"from {cls.__module__!r}")
for name, cls in GLOBALS.OVERRIDDEN.items()
]
for line in tabify(items):
self.write_debug(" " + line)
return plugin_debug_header.__original__(self)
@monkey_patch(yt_dlp.utils.bug_reports_message)
def bug_reports_message(*args, **kwargs):
cls = calling_plugin_class()
if cls is None:
return bug_reports_message.__original__(*args, **kwargs)
with suppress(AttributeError):
return "; " + cls().IE_BUG_REPORT
return ""
# pylint: disable=invalid-name
class patch_function_globals(ContextDecorator):
"""
context manager which replaces a global capture object of given function
"""
def __init__(
self, func: Function, global_object: Any, *, global_name: Optional[str] = None
):
self.obj = global_object
self.globals = func.__globals__
name = global_object.__name__ if global_name is None else global_name
self.name = name if name in self.globals else None
if self.name is None:
warnings.warn(
f"Unable to replace {name!r} in globals for {func}. "
f"Context manager will have no effect.",
RuntimeWarning,
stacklevel=2,
)
def switch_object(self):
if self.name is None:
return
self.globals[self.name], self.obj = self.obj, self.globals[self.name]
def __enter__(self):
self.switch_object()
return self
def __exit__(self, *exc):
self.switch_object()
return False
def windows_enable_vt_mode():
"""dummy stub to supress subprocess warnings"""
SKIP_VT_MODE = patch_function_globals(yt_dlp.YoutubeDL.__init__, windows_enable_vt_mode)
# pylint: disable=protected-access
_PATCHES = (
patch("yt_dlp.utils.bug_reports_message", bug_reports_message),
patch.object(yt_dlp.YoutubeDL, "print_debug_header", plugin_debug_header),
patch_function_globals(yt_dlp.YoutubeDL._write_info_json, write_json_file),
)
def patch_decorator(func):
for _patch in reversed(_PATCHES):
func = _patch(func)
return func
@contextmanager
def patch_context():
_stack = ExitStack()
try:
yield [_stack.enter_context(ctx) for ctx in _PATCHES]
finally:
_stack.close() | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/patching.py | patching.py |
import re
from typing import Any, Dict
from yt_dlp.postprocessor import FFmpegPostProcessor
from yt_dlp.utils import (
HEADRequest,
YoutubeDLError,
determine_ext,
int_or_none,
traverse_obj,
)
# pylint: disable=too-few-public-methods
class GLOBALS:
FFMPEG = FFmpegPostProcessor()
LAST_METADATA: Dict[str, Any] = {}
def codec_name(info):
formats = {"h264": "avc1", "aac": "mp4a", "mpeg4": "mp4v"}
profiles = {
"Simple Profile": (0x14, 0, 3),
"Baseline": (0x42, 0, 0),
"Constrained Baseline": (0x42, 0x40, 0),
"LC": (0x28, 0, 2),
"HE-AAC": (0x28, 0, 5),
"Main": (0x4D, 0x40, 0),
"High": (0x64, 0, 0),
}
cname = info.get("codec_name", "none")
fmt = formats.get(cname, cname)
profile_name = info.get("profile", "???")
match = re.match(r"Profile\s+(\d+)", profile_name)
if match:
profile = int(match.group(1))
level = None
constraint = 0
else:
profile, constraint, level = profiles.get(profile_name, ("???", 0, 0))
level = info.get("level", 0) or level
if level and level < 0:
level = "???"
if fmt == "avc1" and isinstance(level, int) and isinstance(profile, int):
tag = f"{fmt}.{profile:02x}{constraint:02x}{level:02x}"
else:
tag = f"{fmt}.{profile}.{level}"
return cname if "?" in tag else tag
def determine_bitrate(info):
for path in ("tags/variant_bitrate", "bit_rate"):
bitrate = int_or_none(
traverse_obj(info, path.split("/"), expected_type=str), scale=1000
)
if bitrate:
break
return bitrate
def parse_streams(metadata):
v_stream = {}
a_stream = {}
stream_index = []
def fps():
if "r_frame_rate" in v_stream:
match = re.match(r"(\d+)(?:/(\d+))?", v_stream["r_frame_rate"])
if match:
nom, den = match.groups()
return round(int(nom) / int(den or 1))
return None
for stream in sorted(
metadata["streams"],
reverse=True,
key=lambda item: (item.get("height", 0), determine_bitrate(item) or 0),
):
if not v_stream and stream["codec_type"] == "video":
v_stream.update(stream)
stream_index.append(stream["index"])
elif not a_stream and stream["codec_type"] == "audio":
a_stream.update(stream)
stream_index.append(stream["index"])
extension_map = {
"matroska": "webm",
"asf": "wmv",
"hls": "mp4" if v_stream else "m4a",
"dash": "mp4" if v_stream else "m4a",
"mp4": "mp4" if v_stream else None,
"m4a": "m4a",
"mpegts": "ts",
"mpeg": "mpg",
"jpeg": "jpg",
}
extensions = metadata["format"]["format_name"].replace("_pipe", "").split(",")
for ext in extensions:
candidate = extension_map.get(ext)
if candidate:
extension = candidate
break
else:
extension = extensions[0]
abr = determine_bitrate(a_stream)
vbr = determine_bitrate(v_stream)
tbr = (
determine_bitrate(metadata["format"])
or (abr and vbr and abr + vbr)
or (not v_stream and abr)
or (not a_stream and vbr)
or None
)
return {
"ext": extension,
"acodec": codec_name(a_stream),
"vcodec": codec_name(v_stream),
"asr": int_or_none(a_stream.get("sample_rate")),
"fps": fps(),
"tbr": tbr,
"vbr": vbr,
"abr": abr,
"height": int_or_none(v_stream.get("height")),
"width": int_or_none(v_stream.get("width")),
"filesize": int_or_none(metadata["format"].get("size"))
if metadata["format"].get("format_name") not in {"hls", "dash"}
else None,
"format": metadata["format"].get("format_long_name"),
"duration": int(float(metadata["format"].get("duration", 0.0))) or None,
"_stream_index": stream_index,
}
def ffprobe_media(self, media_url, options=(), timeout_s=2.0, **kwargs):
# Invoking ffprobe to determine resolution
self.to_screen(kwargs.get("note") or "Checking media with ffprobe")
timeout_us = self.get_param("socket_timeout") or timeout_s
metadata = GLOBALS.FFMPEG.get_metadata_object(
media_url,
opts=(
"-show_error",
"-show_programs",
"-fflags",
"+ignidx",
"-timeout",
str(int(timeout_us * 1e6)),
*options,
),
)
GLOBALS.LAST_METADATA = metadata
err_cause = traverse_obj(metadata, ("error", "string"))
if err_cause:
err_msg = ": ".join((kwargs.get("errnote", "ffprobe error"), err_cause))
if kwargs.get("fatal"):
raise YoutubeDLError(err_msg)
self.report_warning(err_msg)
return []
ffprobe_formats = []
if len(metadata.get("programs", ())) > 1:
for program in metadata["programs"]:
ffprobe_formats.append(
{
"url": media_url,
"protocol": "http_dash_segments",
"fragments": [],
"manifest_url": media_url,
"manifest_stream_number": f"p:{program['program_id']}",
**parse_streams(
{
"streams": program["streams"],
"format": {**metadata["format"], **program},
}
),
}
)
elif (
len(metadata.get("programs", ())) == 1
and metadata["format"].get("nb_streams", 0) > 2
and metadata["format"].get("format_name") == "dash"
):
for stream in metadata["programs"][0].get("streams", ()):
ffprobe_formats.append(
{
"url": media_url,
"protocol": "http_dash_segments",
"fragments": [],
"manifest_url": media_url,
"manifest_stream_number": stream["index"],
**parse_streams(
{
"streams": [stream],
"format": metadata["format"],
}
),
}
)
else:
ffprobe_formats.append({"url": media_url, **parse_streams(metadata)})
return ffprobe_formats
def headprobe_media(self, media_url, **kwargs):
# pylint: disable=protected-access
options = dict(
video_id=None,
note="Checking media url",
errnote="Media error",
fatal=False,
)
options.update(kwargs)
response = self._request_webpage(
HEADRequest(media_url),
**options,
)
format_info = {
"url": media_url,
"ext": determine_ext(media_url, default_ext="unknown_video"),
}
if response:
ctype = response.headers["Content-Type"]
match = re.match(
"^(?:audio|video|image)/(?:[a-z]+[-.])*([a-zA-Z1-9]{2,})(?:$|;)", ctype
)
if match and format_info["ext"] == "unknown_video":
format_info["ext"] = match.group(1).lower()
format_info["filesize"] = int_or_none(response.headers.get("Content-Length"))
return [format_info]
def probe_media(self, media_url, failfast=False, **kwargs):
if GLOBALS.FFMPEG.probe_available:
probed_formats = ffprobe_media(self, media_url, **kwargs)
if probed_formats or failfast:
return probed_formats
return headprobe_media(self, media_url, **kwargs) | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/probe.py | probe.py |
import ast
import builtins
from contextlib import suppress
from inspect import getmro, getsourcefile, getsourcelines
from typing import Any, Dict, List, Tuple, Union
from .utils import unlazify
AstSequences = (ast.List, ast.Tuple)
AstSequenceType = Union[ast.List, ast.Tuple]
_CACHE: Dict[type, Tuple[str, List[Dict[str, Any]]]] = {}
AST_TYPE_MAP = {
ast.Constant: lambda obj: obj.value,
ast.NameConstant: lambda obj: obj.value,
ast.Str: lambda obj: obj.s,
ast.Num: lambda obj: obj.n,
type(None): lambda obj: obj, # for type completion
}
def dict_info(node: ast.Dict) -> Dict[str, Any]:
line_info = {"_self": node.lineno}
info = {"_lineno": line_info}
for key, value in zip(node.keys, node.values):
key_cls, value_cls = type(key), type(value)
key_value = AST_TYPE_MAP[key_cls](key) if key_cls in AST_TYPE_MAP else key
if value_cls in AST_TYPE_MAP:
actual_value = AST_TYPE_MAP[value_cls](value)
elif isinstance(value, ast.Dict):
actual_value = dict_info(value)
elif isinstance(value, AstSequences):
actual_value = list_info(value)
elif isinstance(value, ast.Name):
actual_value = getattr(builtins, value.id, value.id)
else:
actual_value = value
line_info[key_value] = value.lineno
info[key_value] = actual_value
return info
def list_info(node: AstSequenceType) -> List[Dict[str, Any]]:
data = []
for child in ast.iter_child_nodes(node):
if not isinstance(child, ast.Dict):
continue
info = dict_info(child)
data.append(info)
return data
def find_assignment(node, name_predicate):
for child in ast.iter_child_nodes(node):
with suppress(AssertionError):
assert isinstance(child, ast.Assign)
left_expr = child.targets[0]
assert isinstance(left_expr, ast.Name)
name = left_expr.id
assert name_predicate(name)
return child.value
return None
def get_line_infos(test_cls: type) -> Tuple[str, List[Dict[str, Any]]]:
test_attributes = {"_TESTS", "_TEST"}
cls = unlazify(test_cls)
for cls in getmro(cls):
if not test_attributes & set(cls.__dict__.keys()):
continue
source_lines, line_number = getsourcelines(cls)
ast_obj = ast.parse("".join(source_lines))
ast.increment_lineno(ast_obj, n=line_number - 1)
test_node = find_assignment(
ast_obj.body[0], lambda name: name in test_attributes
)
break
else:
test_node = None
line_number = 0
source_file = str(getsourcefile(cls))
if isinstance(test_node, AstSequences):
data = list_info(test_node)
elif isinstance(test_node, ast.Dict):
data = [dict_info(test_node)]
else:
data = [{"_file": source_file, "_lineno": {"_self": line_number}}]
return source_file, data
def get_test_lineno(cls: type, index: int) -> Dict[str, Any]:
if cls in _CACHE:
source_filename, line_infos = _CACHE[cls]
else:
source_filename, line_infos = get_line_infos(cls)
_CACHE[cls] = source_filename, line_infos
if index >= len(line_infos):
index = len(line_infos) - 1
info = line_infos[index]
info["_file"] = source_filename
return info | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/ast_utils.py | ast_utils.py |
import hashlib
import json
import re
from contextlib import suppress
from importlib import import_module
from itertools import cycle
from pathlib import Path
from typing import Dict, Optional, Union
from urllib.parse import parse_qsl, urlparse
def estimate_filesize(formats, duration):
if not (formats and duration):
return
for item in formats:
if any(map(item.get, ("filesize", "filesize_approx", "fs_approx"))):
continue
tbr = item.get("tbr")
if tbr:
item["filesize_approx"] = 128 * tbr * duration
def unlazify(cls: type) -> type:
"""if extractor class is lazy type, return the actual class"""
with suppress(AttributeError, ImportError):
actual_module = getattr(cls, "_module")
module = import_module(actual_module)
cls = getattr(module, cls.__name__)
return cls
def tabify(items, join_string=" ", alignment="<"):
tabs = tuple(map(lambda x: max(len(str(s)) for s in x), zip(*items)))
for item in items:
aligning = cycle(alignment)
yield join_string.join(
f"{part!s:{align}{width}}"
for part, width, align in zip(item, tabs, aligning)
)
def write_json_file(obj, file):
with open(file, "w", encoding="utf-8") as fd:
json.dump(obj, fd, indent=4)
def md5(data: Union[Path, str]) -> str:
if isinstance(data, Path):
return hashlib.md5(data.read_bytes()).hexdigest()
return hashlib.md5(data.encode("utf-8")).hexdigest()
class ParsedURL:
"""
This class provides a unified interface for urlparse(),
parse_qsl() and regular expression groups
"""
def __init__(self, url: str, *, regex: Optional[str] = None):
self._parts = parts = urlparse(url)
self._query: Dict[str, str] = dict(parse_qsl(parts.query))
self._match = re.match(regex, url) if regex else None
def __getattr__(self, item):
"""
forward the attributes from urlparse.ParsedResult
thus providing scheme, netloc, url, params, fragment
note that .query is shadowed by a different method
"""
return getattr(self._parts, item)
def query(self, key=None, default=None):
if key is None:
return dict(self._query)
return self._query.get(key, default)
def match(self, key=None):
if self._match is None:
return None
if key is None:
return self._match.groupdict()
return self._match.group(key) | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/utils.py | utils.py |
import operator
import os
import re
import sys
import warnings
from contextlib import suppress
from inspect import getfile
from pathlib import Path
from typing import Any, Dict
from unittest import TestCase
from yt_dlp.extractor import gen_extractor_classes
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import preferredencoding, write_string
from ytdlp_plugins import GLOBALS, add_plugins
from .utils import md5, unlazify
DEFAULT_PARAMS = {
"allsubtitles": False,
"check_formats": False,
"consoletitle": False,
"continuedl": True,
"fixup": "never",
"force_write_download_archive": False,
"forcedescription": False,
"forcefilename": False,
"forceformat": False,
"forcethumbnail": False,
"forcetitle": False,
"forceurl": False,
"format": "best",
"ignoreerrors": False,
"listformats": None,
"listsubtitles": False,
"logtostderr": False,
"matchtitle": None,
"max_downloads": None,
"nocheckcertificate": True,
"nopart": False,
"noprogress": False,
"outtmpl": "%(id)s.%(ext)s",
"overwrites": None,
"password": None,
"playliststart": 1,
"prefer_free_formats": False,
"quiet": False,
"ratelimit": None,
"rejecttitle": None,
"retries": 10,
"simulate": False,
"socket_timeout": 20,
"subtitlesformat": "best",
"subtitleslang": None,
"test": True,
"updatetime": True,
"usenetrc": False,
"username": None,
"verbose": True,
"writeannotations": False,
"writedescription": False,
"writedesktoplink": False,
"writeinfojson": True,
"writelink": False,
"writesubtitles": False,
"writeurllink": False,
"writewebloclink": False,
}
def get_params(override=None):
parameters = dict(DEFAULT_PARAMS)
if override:
parameters.update(override)
return parameters
def report_warning(message):
"""
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
"""
if sys.stderr.isatty() and os.name != "nt":
_msg_header = "\033[0;33mWARNING:\033[0m"
else:
_msg_header = "WARNING:"
output = f"{_msg_header} {message}\n"
if "b" in getattr(sys.stderr, "mode", "") or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
def get_class_testcases(cls):
cls = unlazify(cls)
if not issubclass(cls, InfoExtractor):
return
for key in ("_TEST", "_TESTS"):
if key in cls.__dict__:
break
else:
return
test_cases = cls.__dict__[key]
if isinstance(test_cases, dict):
test_cases = [test_cases]
if not isinstance(test_cases, (list, tuple)):
if test_cases is not None:
warnings.warn(f"{cls}: _TEST is {type(test_cases)}", UserWarning)
return
for test_case in test_cases:
test_case["name"] = cls.__name__[:-2]
test_case["cls"] = cls
yield test_case
def get_testcases():
add_plugins()
project_plugins = Path.cwd() / "ytdlp_plugins"
if "--all" in sys.argv or "test_all" in os.environ:
test_classes = gen_extractor_classes()
filter_local = False
else:
test_classes = GLOBALS.FOUND.values()
filter_local = "--extern" not in sys.argv
for cls in test_classes:
module_file = Path(getfile(cls))
if (
filter_local
and project_plugins.is_dir()
and project_plugins != module_file.parents[1]
):
continue
yield from get_class_testcases(cls)
class DownloadTestcase(TestCase):
def assert_field_is_valid(self, expr: bool, field: str, msg: str) -> None:
if not expr:
msg = self._formatMessage(msg, f"Mismatch in field {field!r}")
raise self.failureException(msg)
def assert_field_is_present(self, expr: Any, *fields: str) -> None:
if isinstance(expr, dict):
fields = tuple(field for field in fields if field not in expr)
expr = not fields
if not expr:
fields_str = ", ".join((repr(field) for field in fields))
plural_s = "s" if len(fields) > 1 else ""
msg = f"Missing field{plural_s} {fields_str}"
raise self.failureException(msg)
def expect_value(self, got, expected, field):
check_types = False
with suppress(AssertionError, TypeError):
assert isinstance(expected, tuple)
_expected = []
for item in expected:
if item is None:
_expected.append(type(None))
else:
assert isinstance(item, type)
_expected.append(item)
expected = tuple(_expected)
check_types = True
if isinstance(expected, str):
self.expect_string(got, expected, field)
elif isinstance(expected, type) or check_types:
self.assert_field_is_valid(
isinstance(got, expected),
field,
f"expected type {expected!r}, "
f"but got value {got!r} of type {type(got)!r}",
)
elif isinstance(expected, dict) and isinstance(got, dict):
self.expect_dict(got, expected)
elif isinstance(expected, list) and isinstance(got, list):
self.assert_field_is_valid(
len(expected) == len(got),
field,
f"expected a list of length {len(expected):d}, "
f"but got a list of length {len(got):d} for field {field}",
)
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got)
type_expected = type(item_expected)
field_name = f"{field}[{index}]"
self.assert_field_is_valid(
type_expected == type_got,
field_name,
f"expected type {type_expected!r}, got {type_got!r}",
)
self.expect_value(item_got, item_expected, field_name)
else:
self.expect_field(got, expected, field)
def expect_field(self, got: Any, expected: Any, field: str):
self.assert_field_is_valid(
expected == got,
field,
f"expected {expected!r}, got {got!r}",
)
def expect_string(self, got: Any, expected: str, field: str):
if expected.startswith("re:"):
match_str = expected[len("re:") :]
match_rex = re.compile(match_str)
self.assert_field_is_valid(
isinstance(got, str),
field,
f"expected a {str.__name__} object, " f"but got {type(got).__name__}",
)
self.assert_field_is_valid(
bool(match_rex.match(got)),
field,
f"{got!r} does not match regex r'{match_str}'",
)
elif expected.startswith("startswith:"):
start_str = expected[len("startswith:") :]
self.assert_field_is_valid(
isinstance(got, str),
field,
f"expected a {str.__name__} object, " f"but got {type(got).__name__}",
)
self.assert_field_is_valid(
got.startswith(start_str),
field,
f"{got!r} does not start with {start_str!r}",
)
elif expected.startswith("contains:"):
contains_str = expected[len("contains:") :]
self.assert_field_is_valid(
isinstance(got, str),
field,
f"expected a {str.__name__} object, " f"but got {type(got).__name__}",
)
self.assert_field_is_valid(
contains_str in got,
field,
f"{got!r} does not contain {contains_str!r}",
)
elif expected.startswith("md5:"):
self.assert_field_is_valid(
isinstance(got, str),
field,
f"expected a string object, "
f"but got value {got!r} of type {type(got)!r}",
)
self.expect_field("md5:" + md5(got), expected, field)
elif re.match(r"^(?:min|max)?count:\d+", expected):
self.assert_field_is_valid(
isinstance(got, (list, dict)),
field,
f"expected a list or a dict, "
f"but value is of type {type(got).__name__}",
)
operation, expected_num = expected.split(":")
expected_int = int(expected_num)
if operation == "mincount":
assert_func = operator.ge
msg_tmpl = "expected at least {} items, but only got {}"
elif operation == "maxcount":
assert_func = operator.le
msg_tmpl = "expected not more than {} items, but got {}"
elif operation == "count":
assert_func = operator.eq
msg_tmpl = "expected exactly {} items, but got {}"
else:
raise Exception("Should not happen")
self.assert_field_is_valid(
assert_func(len(got), expected_int),
field,
msg_tmpl.format(expected_int, len(got)),
)
elif len(expected) <= 16:
self.expect_field(got, expected, field)
else:
exp_short = expected
max_len = 64
if len(exp_short) > max_len:
exp_short = "".join((expected[: max_len - 5], "[...]"))
self.assertEqual(
expected, got, f"Mismatch in field {field!r}, expected {exp_short!r}"
)
def expect_dict(self, got_dict, expected_dict: Dict[str, Any]):
self.assertIsInstance(got_dict, dict)
for info_field, expected in expected_dict.items():
got = got_dict.get(info_field)
self.expect_value(got, expected, info_field)
def expect_info_dict(self, got_dict, expected_dict):
self.expect_dict(got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get("_type") not in ("playlist", "multi_video"):
mandatory_fields = ["id", "title"]
if expected_dict.get("ext"):
mandatory_fields.append("ext")
self.assert_field_is_present(got_dict, *mandatory_fields)
# Check for mandatory fields that are automatically set by YoutubeDL
self.assert_field_is_present(
got_dict, *("webpage_url", "extractor", "extractor_key")
)
# Are check-able fields missing from the test case definition?
test_info_dict = dict(
(
key,
value
if not isinstance(value, str) or len(value) < 250
else "md5:" + md5(value),
)
for key, value in got_dict.items()
if value
and key
in (
"id",
"title",
"description",
"uploader",
"upload_date",
"timestamp",
"uploader_id",
"location",
"age_limit",
)
)
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if not missing_keys:
return
info_dict_str = ""
if len(missing_keys) != len(expected_dict):
info_dict_str += "".join(
f" {k!r}: {v!r},\n"
for k, v in test_info_dict.items()
if k not in missing_keys
)
if info_dict_str:
info_dict_str += "\n"
info_dict_str += "".join(
f" {k!r}: {test_info_dict[k]!r},\n" for k in missing_keys
)
write_string("\n'info_dict': {\n" + info_dict_str + "},\n", out=sys.stderr)
self.assert_field_is_present(not missing_keys, *missing_keys)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(msg):
if not any(re.search(w_re, msg) for w_re in warnings_re):
real_warning(msg)
ydl.report_warning = _report_warning | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/_helper.py | _helper.py |
import importlib
import re
import sys
import traceback
from contextlib import suppress
from importlib.abc import Loader, MetaPathFinder
from importlib.machinery import ModuleSpec, PathFinder
from importlib.util import find_spec, module_from_spec
from inspect import getmembers, isclass
from itertools import accumulate
from pathlib import Path
from pkgutil import iter_modules
from typing import Dict
from zipfile import ZipFile
from zipimport import zipimporter
from yt_dlp.extractor.common import InfoExtractor
__version__ = "2023.01.11"
PACKAGE_NAME = __name__
class GLOBALS:
_INITIALIZED = False
FOUND: Dict[str, InfoExtractor] = {}
OVERRIDDEN: Dict[str, InfoExtractor] = {}
SUBPACKAGES = (f"{PACKAGE_NAME}.extractor", f"{PACKAGE_NAME}.postprocessor")
@classmethod
def initialize(cls):
if not cls._INITIALIZED:
sys.meta_path.insert(
0,
PluginFinder(*cls.SUBPACKAGES),
)
cls._INITIALIZED = True
@classmethod
def reset(cls):
# update sys.path_importer_cache
importlib.invalidate_caches()
for package in cls.SUBPACKAGES:
if package in sys.modules:
del sys.modules[package]
PathFinder.find_spec(package)
# pylint: disable=abstract-method
class PluginLoader(Loader):
"""Dummy loader for virtual namespace packages"""
# pylint: disable=unused-argument
def exec_module(self, module):
return None
class PluginFinder(MetaPathFinder):
"""
This class provides one or multiple namespace packages
it searches in 'sys.path' for the existing subdirectories
from which the modules can be imported
"""
def __init__(self, *packages):
self.packages = set()
self._zip_content_cache = {}
for name in packages:
self.packages.update(self.partition(name))
@staticmethod
def partition(name):
yield from accumulate(name.split("."), lambda a, b: ".".join((a, b)))
def zip_ns_dir(self, archive, parts):
cache = self._zip_content_cache.setdefault(archive, set())
path = Path(*parts)
if not cache:
with suppress(OSError):
with ZipFile(archive) as fd:
for name in fd.namelist():
cache.update(set(Path(name).parents))
return (Path(archive, path),) if path in cache else ()
def search_locations(self, fullname):
parts = fullname.split(".")
locations = []
for importer in sys.path_importer_cache.values():
if isinstance(importer, zipimporter):
locations.extend(
self.zip_ns_dir(Path(importer.archive).absolute(), parts)
)
elif hasattr(importer, "path"):
i_path = Path(importer.path)
path = i_path / Path(*parts[1:])
if i_path.name == parts[0] and path.is_dir():
locations.append(path)
elif hasattr(importer, "find_spec"):
spec = importer.find_spec(fullname)
if spec and spec.origin is None:
locations.extend(
Path(loc) for loc in spec.submodule_search_locations
)
return [str(path) for path in dict.fromkeys(locations)]
def find_spec(self, fullname, _path=None, _target=None):
if fullname not in self.packages:
return None
search_locations = self.search_locations(fullname)
if not search_locations:
return None
spec = ModuleSpec(fullname, PluginLoader(), is_package=True)
spec.submodule_search_locations = search_locations
return spec
def invalidate_caches(self):
self._zip_content_cache.clear()
def directories():
spec = find_spec(PACKAGE_NAME)
return spec.submodule_search_locations if spec else []
def iter_plugin_modules(fullname):
with suppress(ModuleNotFoundError):
pkg = importlib.import_module(fullname)
yield from iter_modules(path=pkg.__path__, prefix=f"{fullname}.")
def detected_collisions(from_dict, to_dict):
collisions = set(from_dict.keys()) & set(to_dict.keys())
return {
key: to_dict[key] for key in collisions if from_dict[key] is not to_dict[key]
}
# noinspection PyBroadException
def load_plugins(fullname, suffix, namespace=None):
classes = {}
namespace = namespace or {}
def gen_predicate(package_name):
def check_predicate(obj):
return (
isclass(obj)
and obj.__name__.endswith(suffix)
and obj.__module__.startswith(package_name)
)
return check_predicate
for finder, module_name, _is_pkg in iter_plugin_modules(fullname):
if re.match(r"^(\w+\.)*_", module_name):
continue
try:
try:
spec = finder.find_spec(module_name)
module = module_from_spec(spec)
spec.loader.exec_module(module)
except AttributeError:
# zipimporter instances have .find_spec() for python >= 3.10
module = finder.load_module(module_name)
except Exception: # pylint: disable=broad-except
print(f"Error while importing module '{module_name}'", file=sys.stderr)
traceback.print_exc(limit=-1)
continue
sys.modules[module_name] = module
if hasattr(module, "__all__"):
module_classes = {
name: obj
for name, obj in getmembers(module, gen_predicate(module_name))
if name in getattr(module, "__all__")
}
else:
module_classes = {
name: obj
for name, obj in getmembers(module, gen_predicate(module_name))
if not name.startswith("_")
}
GLOBALS.OVERRIDDEN.update(detected_collisions(module_classes, classes))
classes.update(module_classes)
GLOBALS.OVERRIDDEN.update(detected_collisions(classes, namespace))
namespace.update(classes)
return classes
def add_plugins():
# pylint: disable=import-outside-toplevel
from yt_dlp import extractor, postprocessor
GLOBALS.initialize()
all_classes = getattr(extractor, "_ALL_CLASSES", [])
extractor_map = extractor.__dict__
extractor_map.update(
{cls.__name__: cls for cls in all_classes if cls.__name__ not in extractor_map}
)
for key in GLOBALS.FOUND:
if key in GLOBALS.OVERRIDDEN:
extractor_map[key] = GLOBALS.OVERRIDDEN[key]
elif key in extractor_map:
del extractor_map[key]
GLOBALS.FOUND.clear()
GLOBALS.OVERRIDDEN.clear()
# only detect plugins, they are already loaded by yt-dlp
native_plugins = load_plugins("yt_dlp_plugins.extractor", "IE")
GLOBALS.FOUND.update(native_plugins)
ie_plugins = load_plugins(f"{PACKAGE_NAME}.extractor", "IE", extractor_map)
GLOBALS.FOUND.update(ie_plugins)
extractors = getattr(extractor, "extractors", None)
if extractors is not None:
extractors.__dict__.update(ie_plugins)
for cls in GLOBALS.OVERRIDDEN.values():
with suppress(ValueError):
all_classes.remove(cls)
all_classes[:0] = ie_plugins.values()
pp_plugins = load_plugins(
f"{PACKAGE_NAME}.postprocessor", "PP", postprocessor.__dict__
)
GLOBALS.FOUND.update(pp_plugins) | ytdlp-plugins | /ytdlp_plugins-2023.1.11-py3-none-any.whl/ytdlp_plugins/__init__.py | __init__.py |
import re
from typing import Any, Dict, Iterator, Optional, Sequence, Tuple
from urllib.parse import parse_qsl, quote_plus, urlparse, urlunparse
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
ExtractorError,
GeoRestrictedError,
OnDemandPagedList,
UnsupportedError,
get_element_by_id,
int_or_none,
parse_iso8601,
traverse_obj,
unescapeHTML,
)
__version__ = "2023.01.08"
AnyDict = Dict[str, Any]
class ServusTVIE(InfoExtractor):
IE_NAME = "servustv"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?servustv\.com/
(?:
videos | (?: [\w-]+/(?: v | [abkp]/[\w-]+ ) )
)
/(?P<id>[A-Za-z0-9-]+)
"""
PAGE_SIZE = 20
_GEO_COUNTRIES = ["AT", "DE", "CH", "LI", "LU", "IT"]
_GEO_BYPASS = False
_API_URL = "https://api-player.redbull.com/stv/servus-tv"
_LOGO = "https://presse.servustv.com/Content/76166/cfbc6a68-fd77-46d6-8149-7f84f76efe5c/"
_LIVE_URLS = {
"AT": "https://dms.redbull.tv/v4/destination/stv/stv-linear"
"/personal_computer/chrome/at/de_AT/playlist.m3u8",
"DE": "https://dms.redbull.tv/v4/destination/stv/stv-linear"
"/personal_computer/chrome/de/de_DE/playlist.m3u8",
}
_TESTS = [
{
# new URL schema
"url": "https://www.servustv.com/wissen/v/aa-273cebhp12111/",
"info_dict": {
"id": "aa-273cebhp12111",
"ext": "mp4",
"title": "Was lebt im Steinbruch?",
"series": "P.M. Wissen",
"season_number": 1,
"episode_number": 113,
"description": "md5:a905b6135469cf60a07d4d0ae1e8d49a",
"duration": 271,
"timestamp": int,
"categories": ["P.M. Wissen"],
"age_limit": 0,
"upload_date": "20211111",
"is_live": False,
"thumbnail": r"re:^https?://.*\.jpg",
},
"params": {
"skip_download": True,
"format": "bestvideo",
"geo_bypass_country": "DE",
},
},
{
# old URL schema
"url": "https://www.servustv.com/videos/aa-273cebhp12111/",
"info_dict": {
"id": "aa-273cebhp12111",
"ext": "mp4",
"title": "Was lebt im Steinbruch?",
"description": "md5:a905b6135469cf60a07d4d0ae1e8d49a",
"duration": 271,
"timestamp": int,
"categories": ["P.M. Wissen"],
"age_limit": 0,
"upload_date": "20211111",
"is_live": False,
"thumbnail": r"re:^https?://.*\.jpg",
},
"params": {
"skip_download": True,
"format": "bestvideo",
"geo_bypass_country": "DE",
},
},
{
# playlist
"url": "https://www.servustv.com/volkskultur/b/ich-bauer/aa-1qcy94h3s1w11/",
"info_dict": {
"id": "aa-1qcy94h3s1w11",
"title": "startswith:Ich, Bauer",
"description": "md5:04cd98226e5c07ca50d0dc90f4a27ea1",
},
"playlist": [
{
"info_dict": {
"id": "aa-22rankb9h2112",
"title": "Der Engelswand-Bauer",
"series": "Ich, Bauer",
"season_number": 2,
"episode_number": 3,
"description": "md5:22149f1593cac13703dc31f87162badb",
"timestamp": int,
"upload_date": "20210501",
},
},
{
"info_dict": {
"id": "aa-24hxt6ycw1w12",
"title": "Der Generationenhof",
"series": "Ich, Bauer",
"season_number": 3,
"episode_number": 1,
"description": "md5:01335fd4f02d66d6ae9af2c5387d18a3",
"timestamp": int,
"upload_date": "20210501",
},
},
],
"params": {
"geo_bypass_country": "AT",
"format": "bestvideo",
"skip_download": True,
"playlist_items": ":4",
},
},
{
# block post playlist
"url": "https://www.servustv.com/aktuelles/a/"
"corona-auf-der-suche-nach-der-wahrheit-teil-3-die-themen/193214/",
"info_dict": {
"id": "corona-auf-der-suche-nach-der-wahrheit-teil-3-die-themen",
"title": "Corona – auf der Suche nach der Wahrheit, Teil 3: Die Themen",
"description": "md5:a8a9c163eaf76f5ead9efac244e54935",
},
"playlist": [
{
"info_dict": {
"id": "aa-28zh3u3dn2111",
"title": "Corona-Doku: Teil 3",
"description": "md5:5e020c2618a6d6d2b8a316891c8b8195",
"timestamp": int,
"upload_date": "20211222",
},
},
{
"info_dict": {
"id": "aa-27juub3a91w11",
"title": "Teil 1: Corona – auf der Suche nach der Wahrheit",
"description": "md5:b8de3e9d911bb2cdc0422cf720d795b5",
"timestamp": int,
"upload_date": "20210505",
},
},
{
"info_dict": {
"id": "aa-28a3dbyxh1w11",
"title": "Teil 2: Corona – auf der Suche nach der Wahrheit",
"description": "md5:9904e42bb1b99c731e651ed2276a87e6",
"timestamp": int,
"upload_date": "20210801",
},
},
],
"params": {
"geo_bypass_country": "DE",
"format": "bestvideo",
"skip_download": True,
},
},
{
# main live stream
"url": "https://www.servustv.com/allgemein/p/jetzt-live/119753/",
"info_dict": {
"id": str,
"ext": "mp4",
"title": str,
"description": str,
"duration": None,
"timestamp": (type(None), int),
"upload_date": (type(None), str),
"is_live": True,
"age_limit": (type(None), int),
"thumbnail": (type(None), str),
},
"params": {
"skip_download": True,
"outtmpl": "livestream.%(ext)s",
"format": "bestvideo/best",
},
},
{
# topic live stream
"url": "https://www.servustv.com/natur/k/natur-kanal/269299/",
"info_dict": {
"id": str,
"ext": "re:m3u8|m4a",
"title": str,
"description": str,
"duration": None,
"timestamp": (type(None), int),
"upload_date": (type(None), str),
"is_live": True,
"age_limit": (type(None), int),
"thumbnail": (type(None), str),
"format_id": r"re:audio-(en|de)$",
},
"params": {
"skip_download": True,
"outtmpl": "livestream.%(ext)s",
"format": "bestaudio",
},
},
{
# block page playlist
"url": "https://www.servustv.com/sport/p/motorsport/325/",
"info_dict": {
"id": "motorsport",
"title": "Motorsport",
"description": "md5:cc8e904daecaa697fcf03af3edb3c743",
},
"playlist_mincount": 2,
"params": {
"geo_bypass_country": "DE",
"format": "bestvideo",
"skip_download": True,
},
},
{
"url": "https://www.servustv.com/allgemein/v/aagevnv3syv5kuu8cpfq/",
"only_matching": True,
},
]
JSON_OBJ_ID = "__NEXT_DATA__"
def __init__(self, downloader=None):
super().__init__(downloader=downloader)
self.country_override = None
self.timezone = "Europe/Vienna"
@property
def country_code(self) -> str:
return self.country_override or self._GEO_COUNTRIES[0]
def initialize(self):
geo_bypass_country = self.get_param("geo_bypass_country")
if geo_bypass_country:
self.country_override = geo_bypass_country.upper()
self.to_screen(f"Set countrycode to {self.country_code!r}")
super().initialize()
def _og_search_title(self, html: str, **kwargs) -> str:
site_name = self._og_search_property("site_name", html, default=None)
title = super()._og_search_title(html, **kwargs)
if site_name and title:
title = title.replace(f" - {site_name}", "", 1)
return title
def _playlist_meta(self, page_data: AnyDict, webpage: str) -> AnyDict:
return {
"playlist_id": page_data.get("slug"),
"playlist_title": traverse_obj(page_data, ("title", "rendered"))
or self._og_search_title(webpage, default=None),
"playlist_description": traverse_obj(
page_data, "stv_short_description", "stv_teaser_description"
)
or self._og_search_description(webpage, default=None),
}
def _auto_merge_formats(self, formats: Sequence[AnyDict]):
requested_format = self.get_param("format")
audio_formats = [fmt for fmt in formats if fmt.get("vcodec") == "none"]
audio_only = [fmt["format_id"] for fmt in audio_formats]
video_only = {
fmt["format_id"] for fmt in formats if fmt.get("acodec") == "none"
}
for fmt in audio_formats:
if fmt["ext"] == "m3u8":
fmt["ext"] = "m4a"
if self._downloader and len(audio_only) == 1 and requested_format in video_only:
requested_format = f"{requested_format}+{audio_only[0]}"
self.to_screen(
f"Adding audio stream {audio_only[0]!r} to video only format"
)
self._downloader.format_selector = self._downloader.build_format_selector(
requested_format
)
def _hls_duration(self, formats: Sequence[AnyDict]) -> Optional[float]:
for fmt in formats:
if not fmt["url"].endswith(".m3u8"):
return None
m3u8_doc = self._download_webpage(
fmt["url"],
None,
note="Probing HLS stream duration",
fatal=False,
)
matches = re.findall(
r"(?m)^#EXT(?:INF:(\d*\.?\d+),|-X-ENDLIST)", m3u8_doc or ""
)
if matches and matches[-1] == "":
return sum(map(float, matches[:-1]))
break
return None
def _download_formats(self, video_url: str, video_id: str):
if not video_url:
return [], {}
try:
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
video_url,
video_id=None,
errnote="Stream not available",
)
except ExtractorError as exc:
raise ExtractorError(exc.msg, video_id=video_id, expected=True) from exc
for fmt in formats:
if "height" in fmt:
fmt["format_id"] = f"{fmt['height']}p"
if fmt.get("vcodec") == "none" and fmt.get("language"):
fmt["format_id"] = f"audio-{fmt['language']}"
return formats, subtitles
@staticmethod
def program_info(info: AnyDict) -> AnyDict:
program_info = {"series": info.get("label"), "chapter": info.get("chapter")}
match = re.match(r"\D+(\d+)", info.get("season", ""))
if match:
program_info["season_number"] = int(match[1])
match = re.match(r"Episode\s+(\d+)(?:\s+-(.*))?", info.get("chapter", ""))
if match:
program_info["episode_number"] = int(match[1])
program_info["chapter"] = match[2] and match[2].strip()
return program_info
def _entry_by_id(self, video_id: str, video_url=None, is_live=False) -> AnyDict:
info = self._download_json(
self._API_URL,
query={"videoId": video_id.upper(), "timeZone": self.timezone},
video_id=video_id,
fatal=False,
expected_status=(400, 404, 500),
) or {"error": "Server Error", "message": "Bad JSON Response"}
if "error" in info:
raise ExtractorError(
": ".join((info["error"], info["message"])), expected=True
)
if video_url is None:
video_url = info.get("videoUrl")
live_status = "is_live" if is_live else "not_live"
errors = ", ".join(info.get("playabilityErrors", ()))
if errors and not video_url:
errormsg = f'{info.get("title", "Unknown")} - {errors}'
if "NOT_YET_AVAILABLE" in errors:
live_status = "is_upcoming"
if "GEO_BLOCKED" in errors:
countries = None
blocked_countries = info.get("blockedCountries")
if blocked_countries:
countries = set(self._GEO_COUNTRIES) - set(blocked_countries)
raise GeoRestrictedError(errormsg, countries=countries)
self.raise_no_formats(errormsg, expected=True)
formats, subtitles = self._download_formats(video_url, video_id)
self._auto_merge_formats(formats)
program_info = self.program_info(info)
duration = info.get("duration")
if is_live:
duration = None
elif not duration and live_status == "not_live":
duration = self._hls_duration(formats)
live_status = "was_live" if duration else "is_live"
return {
"id": video_id,
"title": info.get("title", "").strip() or program_info.get("chapter"),
**program_info,
"description": info.get("description"),
"thumbnail": info.get("poster", self._LOGO),
"duration": duration,
"timestamp": parse_iso8601(info.get("currentSunrise")),
"release_timestamp": parse_iso8601(
traverse_obj(
info,
("playabilityErrorDetails", "NOT_YET_AVAILABLE", "availableFrom"),
default=None,
)
),
"live_status": live_status,
"categories": [info["label"]] if info.get("label") else [],
"age_limit": int(
self._search_regex(
r"(?:^|\s)(\d\d?)(?:\s|$)",
info.get("maturityRating", "0"),
"age_limit",
default="0",
)
),
"formats": formats,
"subtitles": subtitles,
}
def _url_entry_from_post(self, post: AnyDict, **kwargs) -> AnyDict:
duration = int_or_none(traverse_obj(post, ("stv_duration", "raw")))
return self.url_result(
post["link"],
video_id=post.get("slug"),
video_title=unescapeHTML(
traverse_obj(
post,
("title", "rendered"),
"stv_short_title",
"stv_teaser_title",
)
),
description=traverse_obj(post, "stv_teaser_description"),
duration=duration and duration * 0.001,
**kwargs,
)
def _live_stream_from_schedule(
self, schedule: Sequence[AnyDict], stream_id: Optional[str]
) -> AnyDict:
if self.country_code in self._LIVE_URLS:
video_url = self._LIVE_URLS[self.country_code]
else:
video_url = self._LIVE_URLS["DE"].replace(
"/de_DE/", f"/de_{self.country_code}/"
)
if not stream_id or stream_id.startswith("stvlive"):
pass
elif stream_id in {"nature", "science", "sports", "wintersport"}:
video_url = video_url.replace("/stv-linear/", f"/{stream_id}/")
else:
raise ExtractorError(f"Unsupported live stream {stream_id!r}")
for item in sorted(
schedule, key=lambda x: x.get("is_live", False), reverse=True
):
if item.get("is_live", False) is False:
self.report_warning("Livestream might not be available")
return self._entry_by_id(
item["aa_id"].lower(), video_url=video_url, is_live=True
)
assert False, "Should not happen"
def _paged_playlist_by_query(self, url: str, qid: str):
url_parts = urlparse(url)
url_query = dict(parse_qsl(url_parts.query))
# pylint: disable=protected-access
# noinspection PyProtectedMember
query_api_url = urlunparse(url_parts._replace(query="", fragment=""))
json_query = {
**url_query,
"geo_override": self.country_code,
"post_type": "media_asset",
# "filter_playability": "true",
"per_page": self.PAGE_SIZE,
}
def fetch_page(page_number: int) -> Iterator[AnyDict]:
json_query.update({"page": page_number + 1})
info = self._download_json(
query_api_url,
query=json_query,
video_id=qid,
note=f"Downloading entries "
f"{page_number * self.PAGE_SIZE + 1}-{(page_number + 1) * self.PAGE_SIZE}",
)
for post in info["posts"]:
yield self._url_entry_from_post(post)
return OnDemandPagedList(fetch_page, self.PAGE_SIZE)
def _entries_from_blocks(self, blocks: Sequence[AnyDict]) -> Iterator[AnyDict]:
"""return url results or multiple playlists"""
categories: Dict[str, AnyDict] = {}
def flatten(_blocks: Sequence[AnyDict], depth=0):
for _block in _blocks:
post = _block.get("post", {})
if "/v/" in post.get("link", ""):
category = post.get("stv_category_name")
entries = categories.setdefault(str(category), {})
entry = self._url_entry_from_post(
post, url_transparent=True, _block=category
)
entries[entry["id"]] = entry
flatten(_block.get("innerBlocks", ()), depth=depth + 1)
flatten(blocks)
if len(categories) == 1:
yield from categories.popitem()[1].values()
else:
for name, entry_map in categories.items():
info = self.playlist_result(
list(entry_map.values()),
playlist_id=name.lower().replace(" ", "_"),
playlist_title=name,
extractor=self.IE_NAME,
extractor_key=self.ie_key(),
)
yield info
@staticmethod
def _page_data(json_obj: AnyDict) -> AnyDict:
for item in ("data", "post", "page"):
page_data = traverse_obj(
json_obj, f"props/pageProps/{item}".split("/"), default={}
)
if page_data:
break
return page_data
def _filter_query(self, json_obj: AnyDict, *names: str) -> Tuple[str, AnyDict]:
data = traverse_obj(
json_obj,
"props/pageProps/initialLibData".split("/"),
"props/pageProps/data".split("/"),
default={},
)
for filter_info in data.get("filters", ()):
name = filter_info.get("value", "none")
if name in names:
return name, filter_info
return "none", {}
def _real_extract(self, url: str) -> AnyDict:
video_id = self._match_id(url)
url_parts = urlparse(url)
url_query = {key.lower(): value for key, value in parse_qsl(url_parts.query)}
# server accepts tz database names
# see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
if "timezone" in url_query:
self.timezone = url_query["timezone"]
self.to_screen(f"Set timezone to {self.timezone!r}")
# single video
if "/v/" in url_parts.path or url_parts.path.startswith("/videos/"):
return self._entry_by_id(video_id)
webpage = self._download_webpage(url, video_id=video_id)
try:
json_obj = self._parse_json(
get_element_by_id(self.JSON_OBJ_ID, webpage), video_id
)
except TypeError as exc:
raise ExtractorError("Cannot extract metadata.") from exc
if self.country_override is None:
self.country_override = traverse_obj(
json_obj, "props/pageProps/geo".split("/"), default=None
)
page_data = self._page_data(json_obj)
# find livestreams
live_schedule = page_data.get("stv_live_player_schedule")
if live_schedule:
return self._live_stream_from_schedule(
live_schedule, page_data.get("stv_linear_stream_id")
)
# create playlist from query
qid, filter_info = self._filter_query(json_obj, "all-videos", "upcoming")
if filter_info:
return self.playlist_result(
self._paged_playlist_by_query(filter_info["url"], qid=qid),
**self._playlist_meta(page_data, webpage),
playlist_count=filter_info.get("count", "N/A"),
)
# create playlist from block data
embedded_video = page_data.get("stv_embedded_video")
entries = [self._url_entry_from_post(embedded_video)] if embedded_video else []
entries.extend(self._entries_from_blocks(page_data.get("blocks", ())))
if not entries:
raise UnsupportedError(url)
return self.playlist_result(
entries,
**self._playlist_meta(page_data, webpage),
)
class ServusSearchIE(ServusTVIE):
IE_NAME = "servustv:search"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?servustv.com
/search
/(?P<id>[^/?#]+)
(?:/all-videos/\d+)?/?$
"""
_TESTS = [
{
# search playlist
"url": "https://www.servustv.com/search/hubert+staller/",
"info_dict": {
"id": "search_hubert+staller",
"title": "search: 'hubert staller'",
"description": None,
},
"params": {"skip_download": True, "geo_bypass": False},
"playlist_mincount": 1,
"playlist_maxcount": 10,
}
]
def _playlist_meta(self, page_data, webpage):
search_term = page_data.get("searchTerm", "[searchTerm]")
return {
"playlist_id": f"search_{quote_plus(search_term)}",
"playlist_title": f"search: {search_term!r}",
}
class PmWissenIE(ServusTVIE):
IE_NAME = "pm-wissen"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?(?:pm-wissen)\.com/
(?:
videos | (?: [\w-]+/(?: v | [p]/[\w-]+ ) )
)
/(?P<id>[A-Za-z0-9-]+)
"""
_TESTS = [
{
# test embedded links from 3rd party sites
"url": "https://www.pm-wissen.com/umwelt/v/aa-24mus4g2w2112/",
"info_dict": {
"id": "aa-24mus4g2w2112",
"ext": "mp4",
"title": "Meer ohne Plastik?",
"description": str,
"duration": 418,
"timestamp": int,
"upload_date": str,
"is_live": False,
"thumbnail": r"re:^https?://.*\.jpg",
},
"params": {"skip_download": True, "format": "bestvideo"},
},
{
# topic playlist
"url": "https://www.pm-wissen.com/mediathek/p/redewendungen-mediathek/11908/",
"info_dict": {
"id": "redewendungen-mediathek",
"title": "Redewendungen Mediathek",
"description": "Alle Videos zum Thema Redewendungen",
},
"playlist_mincount": 20,
"params": {"skip_download": True},
},
{
# playlist from blocks (fails on older yt-dlp versions)
"url": "https://www.pm-wissen.com/mediathek/p/highlights-mediathek/11900/",
"info_dict": {
"id": "highlights-mediathek",
"title": "Mediathek",
"description": "md5:2260ac68a6ee376912beb4c73e3d5b33",
},
"playlist_mincount": 12,
"params": {"skip_download": True},
},
]
JSON_OBJ_ID = "__FRONTITY_CONNECT_STATE__"
@staticmethod
def _page_data(json_obj):
for item in ("page", "data"):
page_data = traverse_obj(json_obj, f"source/{item}".split("/"), default={})
if page_data:
page_data = next(iter(page_data.values()))
break
return page_data
def _filter_query(self, json_obj, *names: str) -> Tuple[str, Dict]:
link = traverse_obj(json_obj, ("router", "link"), default="")
data = traverse_obj(
json_obj,
("source", "data", link),
default={},
)
for filter_info in data.get("filters", ()):
name = filter_info.get("value", "none")
if name in names:
return name, filter_info
page_data = self._page_data(json_obj)
category = page_data.get("categories", ())
if category:
return category[0], {
"url": "https://backend.pm-wissen.com/wp-json/rbmh/v2/query-filters/query/?"
f"categories={category[0]}&f[primary_type_group]=all-videos&filter_bundles=true&"
"filter_non_visible_types=true&geo_override=DE&orderby=rbmh_playability&"
"page=3&per_page=12&post_type=media_asset&query_filters=primary_type_group"
}
return "none", {}
class PmWissenSearchIE(PmWissenIE):
IE_NAME = "pm-wissen:search"
_VALID_URL = r"""(?x)
https?://
(?:www\.)?pm-wissen.com
/search
/(?P<id>[^/?#]+)
(?:/all-videos/\d+)?/?$
"""
_TESTS = [
{
# search playlist
"url": "https://www.pm-wissen.com/search/weltall/",
"info_dict": {
"id": "search_weltall",
"title": "search: 'weltall'",
},
"params": {"skip_download": True, "geo_bypass": False},
"playlist_mincount": 21,
}
]
def _playlist_meta(self, page_data, webpage):
search_query = page_data.get("searchQuery", "[searchQuery]")
return {
"playlist_id": f"search_{quote_plus(search_query)}",
"playlist_title": f"search: {search_query!r}",
} | ytdlp-servustv | /ytdlp_servustv-2023.1.8-py3-none-any.whl/yt_dlp_plugins/extractor/servustv.py | servustv.py |
import re
from contextlib import suppress
from operator import itemgetter
from urllib.parse import urlparse
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import (
ExtractorError,
OnDemandPagedList,
UnsupportedError,
parse_iso8601,
sanitize_url,
traverse_obj,
)
__version__ = "2023.01.10"
# pylint: disable=protected-access
with suppress(AttributeError):
# dirty hack to disable EpochIE
from yt_dlp.extractor import epoch, lazy_extractors
lazy_extractors.EpochIE._ENABLED = False
epoch.EpochIE._ENABLED = False
# pylint: disable=abstract-method
class YoumakerIE(InfoExtractor):
_VALID_URL = r"""(?x)
https?://(?:[a-z][a-z0-9]+\.)?youmaker\.com/
(?:v|c|video|embed|channel|playlist)/
(?P<id>[0-9a-zA-Z-]+)
"""
_TESTS = [
{
# single video with playlist subtitles
"url": "https://www.youmaker.com/video/8edd428d-74be-4eb0-b3fd-7b277e508adb",
"info_dict": {
"id": "8edd428d-74be-4eb0-b3fd-7b277e508adb",
"ext": "mp4",
"title": "x22 Report Ep. 2597b - Trump Never Conceded, Space Force Going...",
"description": r"re:(?s)^https://t\.me/realx22report\.+",
"thumbnail": r"re:^https?://.*\.(?:jpg|png)$",
"duration": 2697,
"upload_date": "20211011",
"uploader": "user_d94db024048d1d562eaa479eeedfc0bf6a8a8a3b",
"timestamp": 1633915895,
"channel": "Channel 17",
"channel_id": "e92d56c8-249f-4f61-b7d0-75c4e05ecb4f",
"channel_url": r"re:https?://(:?[a-z][a-z0-9]+\.)?youmaker.com/channel/"
r"e92d56c8-249f-4f61-b7d0-75c4e05ecb4f",
"tags": ["qanon", "trump", "usa", "maga"],
"categories": ["News"],
"live_status": "not_live",
"subtitles": {
"en": [
{
"url": r"re:https?://[a-z1-3]+.youmaker.com/assets/2021/1011/"
r"8edd428d-74be-4eb0-b3fd-7b277e508adb/subtitles_en.m3u8"
}
]
},
},
"params": {"skip_download": True, "listsubtitles": True},
"expected_warnings": [r"Missing m3u8 info\. Trying alternative server"],
},
{
# test video with JSON requested subtitles
"url": "https://www.youmaker.com/video/b58f88fe-4ddb-4c11-bccf-46f579b7d978",
"info_dict": {
"id": "b58f88fe-4ddb-4c11-bccf-46f579b7d978",
"ext": "mp4",
"title": "Snow cone vendor in Antigua, Guatemala",
"description": r're:(?s)^We call it "Gola" here in India\.\.\..*',
"upload_date": "20211001",
"uploader": "user_71885a31e113614751e14bba45d3bdcfd10d3f08",
"timestamp": 1633055950,
"live_status": "not_live",
"subtitles": {
"en": [
{
"url": r"re:https?://[a-z1-3]+.youmaker.com/assets/2021/1001/"
r"b58f88fe-4ddb-4c11-bccf-46f579b7d978/"
r"subtitle_1633055993844\.auto\.en\.vtt"
}
]
},
},
"params": {"skip_download": True, "listsubtitles": True},
"expected_warnings": [r"Missing m3u8 info\. Trying alternative server"],
},
{
# all videos from channel
"url": "https://youmaker.com/channel/f06b2e8d-219e-4069-9003-df343ac5fcf3",
"playlist_mincount": 30,
"info_dict": {
"id": "f06b2e8d-219e-4069-9003-df343ac5fcf3",
"title": "YoYo Cello",
"description": "Connect the World Through Music. \nConnect Our Hearts with Music.",
},
},
{
# all videos from channel (new scheme)
"url": "https://youmaker.com/c/QDRVZ1RAm2DY_Horror-Sci-Fi-Classics.html",
"playlist_mincount": 10,
"info_dict": {
"id": "694dd4c5-edcc-4718-9d1e-d907b0994fa7",
"title": "Classics +",
"description": "Classics + is a channel to enjoy classics films and series from "
"the past that you might have missed and now could be interested in watching or "
"just simply want to see again.\n\n",
},
},
{
# all videos from channel playlist
"url": "https://www.youmaker.com/channel/f8d585f8-2ff7-4c3c-b1ea-a78d77640d54/"
"playlists/f99a120c-7a5e-47b2-9235-3817d1c12662",
"playlist_mincount": 9,
"info_dict": {
"id": "f99a120c-7a5e-47b2-9235-3817d1c12662",
"title": "Mini Cakes",
},
},
{
# test embedded videos from another site
"url": "https://www.epochtimes.de/feuilleton/buecher/"
"corona-impfung-was-aerzte-und-patienten-unbedingt-wissen-sollten-a3619532.html",
"md5": "fd1f0a675332c58d18202e45e89a2d3a",
"info_dict": {
"id": "203108a4-b4c9-4a65-ac2e-dceac7e4e462",
"ext": "mp4",
"title": "contains:Corona-Impfung",
"description": "contains:Epoch Times",
"uploader": str,
"upload_date": str,
"timestamp": int,
"live_status": "not_live",
},
"params": {"skip_download": True},
},
{
# test embedded videos from another site
"url": "https://epochtimes.pl/metoda-kpch-ogolnoswiatowa-agenda-komunistycznej-partii-chin-film/",
"playlist_mincount": 1,
"info_dict": {
"id": "metoda-kpch-ogolnoswiatowa-agenda-komunistycznej-partii-chin-film",
"title": "startswith:Metoda KPCh",
"description": str,
"timestamp": (float, int, None),
"upload_date": (str, None),
},
"playlist": [
{
"md5": "4ad0f3bdc64a393e8907967636f9f439",
"info_dict": {
"id": "1c99bd32-6092-4bc5-5878-cc5fd6724d04",
"ext": "mp4",
"title": "Metoda KPCh",
"description": "startswith:Czy mo\u017cemy cierpie\u0107 bardziej",
"uploader": str,
"upload_date": str,
"timestamp": int,
"live_status": "not_live",
},
}
],
"params": {"skip_download": True},
},
{
# test embedded videos from another site
"url": "https://www.theepochtimes.com/dick-morris-discusses-his-book-the-return-trumps-big-2024-comeback_4819205.html",
"info_dict": {
"id": "9489f994-2a20-4812-b233-ac0e5c345632",
"ext": "mp4",
"title": "LIVE: Dick Morris Discusses His Book 'The Return: Trump’s Big 2024 Comeback'",
"description": str,
"uploader": str,
"upload_date": "20221025",
"timestamp": 1666738800,
"duration": 4257,
"live_status": "was_live",
},
"params": {"skip_download": True},
},
{"url": "https://www.youmaker.com/embed/Dnnrq0lw8062/", "only_matching": True},
{"url": "https://vs.youmaker.com/v/Dnnrq0lw8062/", "only_matching": True},
{"url": "https://youmaker.com/playlist/v6aLJnrqkoXO/", "only_matching": True},
{"url": "http://youmaker.com/channel/ntd/", "only_matching": True},
{
"url": "https://youmaker.com/c/Vvle0k05VQpm_Musical-Moments-East.html",
"only_matching": True,
},
]
REQUEST_LIMIT = 50
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
super().__init__(downloader=downloader)
self._category_map = None
self._cache = {}
@classmethod
def _extract_embed_urls(cls, url, webpage):
uids = re.findall(
r"""(?x)
<(?:iframe|script|video)[^>]+src="
(?:https?:)?//(?:[a-z][a-z0-9]+\.)?
youmaker\.com/(?:embed/|assets/|player/)+(?P<uid>[0-9a-zA-Z-]+)
[^"]*"
""",
webpage,
)
return (f"https://youmaker.com/v/{uid}" for uid in uids)
@property
def _base_url(self):
return sanitize_url("//www.youmaker.com", scheme="https")
@property
def _asset_url(self):
# as this url might change in the future
# it needs to be extracted from some js magic...
return sanitize_url("//vs.youmaker.com/assets", scheme="https")
@staticmethod
def _live_url(video_id, endpoint="playlist.m3u8"):
return sanitize_url(
f"//live2.youmaker.com/{video_id}/{endpoint}", scheme="https"
)
@staticmethod
def _try_server_urls(url):
"""as some playlist urls are invalid
we can generate possible candidates to try
"""
if not url:
return []
match_replace = (
("//vs.youmaker.com/", "//vs1.youmaker.com/"),
("//vs1.youmaker.com/", "//vs.youmaker.com/"),
)
candidates = [url]
for match, replace in match_replace:
other_url = url.replace(match, replace)
if url != other_url:
candidates.append(other_url)
return candidates
def _call_api(self, uid, path, what="JSON metadata", fatal=True, **kwargs):
"""
call the YouMaker JSON API and return a valid data object
path: API endpoint
what: query description
fatal: if True might raise ExtractorError otherwise warn and return None
**kwargs: parameters passed to _download_json()
"""
url = f"{self._base_url}/v1/api/{path}"
kwargs.setdefault("note", f"Downloading {what}")
kwargs.setdefault("errnote", f"Failed to download {what}")
info = self._download_json(url, uid, fatal=fatal, **kwargs)
# soft error already reported
if info is False:
return None
status = traverse_obj(info, "status", str)
data = traverse_obj(info, "data", (list, dict))
if status != "ok":
msg = f'{what} - {status or "Bad JSON response"}'
if fatal or status is None:
raise ExtractorError(
msg, video_id=None, expected=isinstance(status, str)
)
self.report_warning(msg, video_id=uid)
return data
@property
def _categories(self):
if self._category_map is None:
category_list = self.cache.load("youmaker", "categorylist")
if category_list is None:
category_list = (
self._call_api(
None, "video/category/list", what="categories", fatal=False
)
or ()
)
self.cache.store("youmaker", "categorylist", category_list)
self._category_map = {item["category_id"]: item for item in category_list}
return self._category_map
def _categories_by_id(self, cid):
categories = []
if cid is None:
return categories
while True:
item = self._categories.get(cid)
if item is None or item["category_name"] in categories:
break
categories.insert(0, item["category_name"])
cid = item["parent_category_id"]
return categories
# pylint: disable=arguments-differ
def _get_subtitles(self, system_id):
if system_id is None:
return {}
subs_list = (
self._call_api(
system_id,
"video/subtitle",
what="subtitle info",
query={"systemid": system_id},
fatal=False,
)
or ()
)
subtitles = {}
for item in subs_list:
url = item.get("url")
if not url:
continue
lang = item.get("language_code", "xx")
subtitles.setdefault(lang, []).append({"url": f"{self._asset_url}/{url}"})
return subtitles
def handle_formats(self, playlist_url, video_uid):
formats = []
playlist_subtitles = {}
for count, candidate_url in enumerate(self._try_server_urls(playlist_url)):
if count > 0:
self.report_warning(
f"Missing m3u8 info. Trying alternative server ({count})",
video_id=video_uid,
)
formats, playlist_subtitles = self._extract_m3u8_formats_and_subtitles(
sanitize_url(candidate_url, scheme="https"),
video_uid,
ext="mp4",
errnote=False,
fatal=False,
)
if formats:
break
# sometimes there are duplicate entries, so we filter them out
format_mapping = {item["url"]: item for item in formats}
formats = list(format_mapping.values())
for item in formats:
height = traverse_obj(item, "height", int)
if height:
item["format_id"] = f"{height}p"
return formats, playlist_subtitles
def _video_entry_by_metadata(self, info):
try:
video_uid = info["video_uid"]
except KeyError as exc:
raise ExtractorError(f"{exc!s} not found in video metadata") from exc
release_timestamp = None
live_status = "was_live" if info.get("live") else "not_live"
video_info = info.get("data", {})
playlist_url = traverse_obj(
video_info, ("videoAssets", "Stream"), expected_type=str
)
if info.get("live") and playlist_url is None:
live_info = (
self._download_json(
self._live_url(video_uid, "status"),
video_id=video_uid,
note="Checking live status",
errnote="Live status not available",
fatal=False,
)
or {}
)
live_status = (
"post_live"
if traverse_obj(live_info, ("data", "status")) == "end"
else "is_live"
)
release_timestamp = parse_iso8601(
traverse_obj(live_info, ("data", "start_time"))
) or parse_iso8601(info.get("scheduled_time"))
storage_path = traverse_obj(live_info, ("data", "storage_path"))
if live_status == "post_live" and storage_path:
live_status = "was_live"
playlist_url = (
f"{self._asset_url}/{storage_path}/{video_uid}/playlist.m3u8"
)
else:
playlist_url = self._live_url(video_uid)
formats, playlist_subtitles = self.handle_formats(playlist_url, video_uid)
if live_status != "not_live" and not formats:
if live_status == "is_live":
live_status = "is_upcoming"
errmsg = (
"This live event has ended."
if live_status in {"was_live", "post_live"}
else "This live event has not started yet."
)
self.raise_no_formats(errmsg, expected=True, video_id=video_uid)
if live_status in {"is_live", "is_upcomng", "post_live"}:
live_count_info = self._call_api(
video_uid,
"live/count",
what="live count",
fatal=False,
query={"id": video_uid},
)
else:
live_count_info = None
return {
"id": video_uid,
"title": info["title"],
"description": info.get("description"),
"formats": formats,
"live_status": live_status,
"timestamp": parse_iso8601(info.get("uploaded_at")),
"release_timestamp": release_timestamp,
"uploader": info.get("uploaded_by"),
"duration": video_info.get("duration"),
"categories": self._categories_by_id(info.get("category_id")),
"tags": [tag.strip() for tag in info.get("tag", "").strip("[]").split(",")],
"channel": info.get("channel_name"),
"channel_id": info.get("channel_uid"),
"channel_url": (
f'{self._base_url}/channel/{info["channel_uid"]}'
if "channel_uid" in info
else None
),
"thumbnail": info.get("thumbmail_path"),
"view_count": info.get("click"),
"concurrent_view_count": traverse_obj(live_count_info, "liveCount"),
"subtitles": playlist_subtitles
or self.extract_subtitles(info.get("system_id")),
}
def _video_entry_by_id(self, uid):
info = self._cache.get(uid) or self._call_api(
uid, f"video/metadata/{uid}", what="video metadata"
)
return self._video_entry_by_metadata(info)
def _paged_playlist_entries(self, uid, page_size=REQUEST_LIMIT):
def fetch_page(page_number):
offset = page_number * page_size
info = self._call_api(
uid,
path="playlist/video",
what=f"playlist entries {offset + 1}-{offset + page_size}",
query={"playlist_uid": uid, "offset": offset, "limit": page_size},
)
if not isinstance(info, list):
raise ExtractorError("Unexpected playlist entries", uid, expected=False)
for item in info:
video_uid, title = itemgetter("video_uid", "video_title")(item)
yield self.url_result(
f"{self._base_url}/video/{video_uid}",
ie=self.ie_key(),
video_id=video_uid,
video_title=title,
)
return OnDemandPagedList(fetch_page, page_size)
def _paged_channel_entries(self, uid, page_size=REQUEST_LIMIT):
def fetch_page(page_number):
offset = page_number * page_size
info = self._call_api(
uid,
path=f"video/channel/{uid}",
what=f"channel entries {offset + 1}-{offset + page_size}",
query={"offset": offset, "limit": page_size},
)
if not isinstance(info, list):
raise ExtractorError("Unexpected channel entries", uid, expected=False)
for item in info:
video_uid, title = itemgetter("video_uid", "title")(item)
self._cache[video_uid] = item
yield self.url_result(
f"{self._base_url}/video/{video_uid}",
ie=self.ie_key(),
video_id=video_uid,
video_title=title,
)
return OnDemandPagedList(fetch_page, page_size)
def _playlist_entries_by_id(self, uid):
info = self._call_api(uid, f"playlist/{uid}", what="playlist metadata")
return self.playlist_result(
self._paged_playlist_entries(info["playlist_uid"]),
playlist_id=info["playlist_uid"],
playlist_title=info.get("name"),
playlist_description=None,
)
def _channel_entries_by_id(self, uid):
info = self._call_api(
uid, path=f"video/channel/metadata/{uid}", what="channel metadata"
)
return self.playlist_result(
self._paged_channel_entries(info["channel_uid"]),
playlist_id=info["channel_uid"],
playlist_title=info.get("name"),
playlist_description=info.get("description"),
)
def _real_extract(self, url):
dispatch = (
(r"/(?:v|video|embed)/(?P<uid>[a-zA-z0-9-]+)", self._video_entry_by_id),
(
r"(/channel/[a-zA-z0-9-]+)?/playlists?/(?P<uid>[a-zA-z0-9-]+)",
self._playlist_entries_by_id,
),
(
r"/(?:c|channel)/(?P<uid>[a-zA-z0-9-]+)(?:[^/]*)/?$",
self._channel_entries_by_id,
),
)
url_path = urlparse(url).path
for regex, func in dispatch:
match = re.match(regex, url_path)
if not match:
continue
return func(**match.groupdict())
raise UnsupportedError(url) | ytdlp-youmaker | /ytdlp_youmaker-2023.1.10-py3-none-any.whl/yt_dlp_plugins/extractor/youmaker.py | youmaker.py |
The MIT License (MIT)
Copyright © 2021 Stift007
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE | ytdlraw | /ytdlraw-0.2.1.tar.gz/ytdlraw-0.2.1/LICENSE.md | LICENSE.md |
.. :changelog:
History
-------
0.1.13
~~~~~~~~~~~~~~~~~~
Official Release date of ``ytdownloader`` python package.
- Added support for python 2.7. Resolved issue for pickling class methods raised from multiprocessing library.
0.1.12
~~~~~~~~~~~~~~~~~~
Official Release date of ``ytdownloader`` python package.
- Added proper handling of directory paths for ``ytdownloader`` package.
- Added support for downloading both videos, playlists for a directory (in earlier version it was either
videos or playlists).
0.1.11 (25 Apr 2018)
~~~~~~~~~~~~~~~~~~
Official Release date of ``ytdownloader`` python package.
- Added yaml/json configuration support for ``ytdownloader`` package.
- First release on PyPI. | ytdownloader | /ytdownloader-0.1.14.tar.gz/ytdownloader-0.1.14/HISTORY.rst | HISTORY.rst |
YoutubeDownloader
=================
**YoutubeDownloader** is an automated service to download multiple
youtube videos at a time.
About
-----
**YoutubeDownloader** is written in ``Python``. It uses
``Asynchronous Python Multiprocessing`` at its heart which facilitates
user to download more than one video at a time.
**YoutubeDownloader** must use a configuration file. It supports a YAML
/ JSON format of configuration file. This configuration file gives
structure and usability to the service. It defines what
**videos/playlists** needs to be downloaded and how they are going to be
stored.
Configuration Syntax
--------------------
**YoutubeDownloader** supports YAML / JSON configuration formats.
**YoutubeDownloader** prefers YAML more than JSON. Below is the snippet
of sample configurations in YAML / JSON format.
::
settings:
process: 2
download:
mostlyinsane:
dirname: '../mostlyinsane'
videos:
- 'https://www.youtube.com/watch?v=vcKPjDUc5EQ'
trippling:
dirname: 'trippling'
playlists: 'https://www.youtube.com/watch?list=PLTB0eCoUXEraZe3d7fJRdB-znE5D0cMZ7'
official-ceogiri:
dirname: 'official-ceogiri'
playlists:
- 'https://www.youtube.com/watch?list=PLTB0eCoUXEraZe3d7fJRdB-znE5D0cMZ7'
::
{
"settings": {
"process": 5
},
"download": {
"mostlyinsane": {
"dirname": "../mostlyinsane",
"videos": [
"https://www.youtube.com/watch?v=vcKPjDUc5EQ"
]
},
"trippling": {
"dirname": "trippling",
"playlists": "https://www.youtube.com/watch?list=PLTB0eCoUXEraZe3d7fJRdB-znE5D0cMZ7"
},
"official-ceogiri": {
"dirname": "official-ceogiri",
"playlists": [
"https://www.youtube.com/watch?list=PLTB0eCoUXEraZe3d7fJRdB-znE5D0cMZ7"
]
}
}
}
``settings`` defines service level variables. - ``process`` to force
**YoutubeDownloader** to use ``Asynchronous Python Multiprocessing`` and
tells how many processes should be deployed to download
**videos/playlists** at a time.
``download`` defines what **videos/playlists** to download. It tags
**dirnames** with **videos/playlists** internally and store the
downloaded **videos/playlists** in the respective **directory**.
- ``dirname`` **relative / absolute directory path** to store videos
in.
- ``videos`` **single / array of youtube videos link** to download.
- ``playlists`` **single / array of youtube playlists link** to
download.
Install
-------
This is a pure-Python package built for Python 2.6+ and Python 3.0+. To
set up:
::
sudo pip install ytdownloader
Options
-------
::
ytdownloader --help
- ``config`` specifies the location for the configuration file to
**YoutubeDownloader**. If it omits, **YoutubeDownloader** looks in
the current directory for the configuration file.
- ``--version`` specifies the currect version of **YoutubeDownloader**.
- ``--about`` about text for **YoutubeDownloader**
Usage
-----
::
ytdownloader --version // latest version of ytdownloader
::
ytdownloader --about // about text for ytdownloader
::
ytdownloader // start the ytdownloader and search config file in current directory
::
ytdownloader docs/config.yaml // start the ytdownloader and use docs/config.yaml as config file
| ytdownloader | /ytdownloader-0.1.14.tar.gz/ytdownloader-0.1.14/README.rst | README.rst |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/mukultaneja/YoutubeDownloader/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug"
is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "feature"
is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
ytdownloader could always use more documentation, whether as part of the
official ytdownloader docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/mukultaneja/YoutubeDownloader/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
``ytdownloader`` runs on 2.7+ and Python 3+ in any OS. To set up the development
environment:
1. Fork the `YoutubeDownloader repo <https://github.com/mukultaneja/YoutubeDownloader>`__
2. Clone your fork locally::
git clone [email protected]:your_user_id/YoutubeDownloader.git
3. Install your local copy into a virtualenv. If you have `virtualenvwrapper <http://virtualenvwrapper.readthedocs.org/en/latest/install.html>`__ installed, this is how you set up your fork for local development::
$ mkvirtualenv YoutubeDownloader
$ cd YoutubeDownloader/
$ python setup.py develop
4. Create a branch for local development::
git checkout -b <branch-name>
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8::
6. Commit your changes and push your branch to GitHub. Then send a pull
request::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push --set-upstream origin <branch-name>
7. To delete your branch::
git branch -d <branch-name>
git push origin --delete <branch-name>
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
2. The pull request should work for Python 2.7 and 3+.
Release
-------
1. Update ``__VERSION__ = x.x.x`` in `ytdownloader <https://github.com/mukultaneja/YoutubeDownloader/blob/master/ytdownloader/__init__.py>`__
2. Update ``HISTORY.rst`` with changes
3. Commit, create an annotated tag and push the code::
git commit .
git tag -a vx.x.x
git push --follow-tags
4. To `release to PyPi`_, run::
make clean
python setup.py sdist bdist_wheel --universal
twine upload dist/*
.. _release to PyPi: https://packaging.python.org/en/latest/distributing.html | ytdownloader | /ytdownloader-0.1.14.tar.gz/ytdownloader-0.1.14/CONTRIBUTING.rst | CONTRIBUTING.rst |
from apiclient.discovery import build
import pandas as pd
class YoutubeVideo(object):
def __init__(self, api_key='YOUR KEY', channel_id='UC_eOodxvwS_H7x2uLQa-svw'):
self.api_key = api_key
self.channel_id= channel_id
self.temp = []
self.data = self.__validate()
self.__base_url = "https://www.youtube.com/watch?v="
def __validate(self):
"""
Private Function User dosent have Access
:return:
"""
try:
self.youtube = build('youtube', 'v3', developerKey=self.api_key)
self.videos = self.__generator()
return self.videos
except:
print("API Key or Channel ID was incorrect ")
def __generator(self):
"""
Private Function User dosent have Access
:return:
"""
# get Uploads playlist id
res = self.youtube.channels().list(id=self.channel_id,
part='contentDetails').execute()
playlist_id = res['items'][0]['contentDetails']['relatedPlaylists']['uploads']
# Blank Videos
next_page_token = None
while True:
res = self.youtube.playlistItems().list(playlistId=playlist_id,
part='snippet',
maxResults=50,
pageToken=next_page_token).execute()
self.temp += res['items']
next_page_token = res.get('nextPageToken')
if next_page_token is None:
break
return self.temp
@property
def get_title(self):
return [x['snippet']['title'] for x in self.data]
@property
def get_video_url(self):
return [self.__base_url + x['snippet']['resourceId']['videoId'] for x in self.data]
@property
def get_image(self):
return [self.__base_url+x['snippet']['thumbnails']['medium']['url'] for x in self.data]
@property
def get_all(self):
get_title = self.get_title
link = self.get_video_url
image = self.get_image
return [y for y in zip(get_title,link,image)]
@property
def save_excel(self):
get_title = self.get_title
link = self.get_video_url
image = self.get_image
self.df = pd.DataFrame({"Title":get_title,
"link":link,
"Image":image})
self.df.to_excel("Youtube.xlsx")
print("Report Created ")
return True
@property
def save_json(self):
get_title = self.get_title
link = self.get_video_url
image = self.get_image
self.df = pd.DataFrame({"Title":get_title,
"link":link,
"Image":image})
self.df.to_json("Youtube.json")
print("Report Created ") | ytdvideo | /ytdvideo-2.0.0-py3-none-any.whl/ytdvideo.py | ytdvideo.py |
# YTE - A YAML template engine with Python expressions
[](https://yte-template-engine.github.io)
[](https://github.com/yte-template-engine/yte/blob/main/pyproject.toml#L30)


[](https://anaconda.org/conda-forge/yte)
[](https://anaconda.org/conda-forge/yte)
[](https://github.com/conda-forge/yte-feedstock)
YTE is a template engine for YAML format that utilizes the YAML structure in combination with Python expressions for enabling to dynamically build YAML documents.
The key idea of YTE is to rely on the YAML structure to enable conditionals, loops and other arbitrary Python expressions to dynamically render YAML files.
Python expressions are thereby declared by prepending them with a `?` anywhere in the YAML.
Any such value will be automatically evaluated by YTE, yielding plain YAML as a result.
Importantly, YTE templates are still valid YAML files (for YAML, the `?` expressions are just strings).
Documentation of YTE can be found at https://yte-template-engine.github.io.
## Comparison with other engines
Lots of template engines are available, for example the famous generic [jinja2](https://jinja.palletsprojects.com).
The reasons to generate a YAML specific engine are
1. The YAML syntax can be exploited to simplify template expression syntax, and make it feel less foreign (i.e. fewer special characters for control flow needed) while increasing human readability.
2. Whitespace handling (which is important with YAML since it has a semantic there) becomes unnecessary (e.g. with jinja2, some [tuning](https://radeksprta.eu/posts/control-whitespace-in-ansible-templates) is required to obtain proper YAML rendering).
Of course, YTE is not the first YAML specific template engine.
Others include
* [Yglu](https://yglu.io)
* [Emrichen](https://github.com/con2/emrichen)
The main difference between YTE and these two is that YTE extends YAML with plain Python syntax instead of introducing another specialized language.
Of course, the choice is also a matter of taste.
| yte | /yte-1.5.1.tar.gz/yte-1.5.1/README.md | README.md |
# yter
Version 3.1.1
2023, June 23
Clever, quick iterator functions that make your smile whiter.
This will work with versions of Python 2.6+ and 3.2+.
## Functions
There are many functions that process data from iterators in efficient ways.
* `yany` -- Extended version of the builtin any, test if any values are true
* `yall` -- Extended version of the builtin all, test if all values are true
* `ylen` -- Complete an iterator and get number of values
* `first` -- Get the first value from an iteraterable
* `last` -- Get the final value from an iteraterable
* `head` -- Get the first values from an iteraterable
* `tail` -- Get the last values from an iteraterable
* `minmax` -- Find the minimum and maximum values from an iterable
* `isiter` -- Test if an object is iterable, but not a string type
* `uniter` -- Efficient copy of non sequence data
* `repeat` -- Efficient lazy copy of non sequence data
## Iterators
There are several iterators that wrap an existing iterator and process it's output.
* `call` -- Iterator that works with mixed callable types
* `percent` -- Iterator that skips a percentage of values
* `flat` -- Iterator of values from a iterable of iterators
* `chunk` -- Iterator of lists with a fixed size from iterable
* `key` -- Iterator of pairs of key result and original values
* `choose` -- Split into iterators for true and false values
* `unique` -- Iterate only the unique values
* `duplicates` -- Iterate only the duplicated values
* `recurse` -- Recurse values from a callable
## Keys
Utility functions that are useful to use as a key argument
* `format` -- Create a function that formats given values into strings
* `numeric` -- Split a string into string and integer sections
* `getter` -- Shorthand for attrgetter, itemgetter, and methodcaller operators
[More documentation](https://gitlab.com/shredwheat/yter/blob/master/docs/docs/index.md) found in the repository.
| yter | /yter-3.1.1.tar.gz/yter-3.1.1/README.md | README.md |
# Instruction
A sample Python project
=======================
A sample project that exists as an aid to the `Python Packaging User Guide
<https://packaging.python.org>`_'s `Tutorial on Packaging and Distributing
Projects <https://packaging.python.org/en/latest/distributing.html>`_.
This projects does not aim to cover best practices for Python project
development as a whole. For example, it does not provide guidance or tool
recommendations for version control, documentation, or testing.
`The source for this project is available here
<https://github.com/pypa/sampleproject>`_.
Most of the configuration for a Python project is done in the ``setup.py``
file, an example of which is included in this project. You should edit this
file accordingly to adapt this sample project to your needs.
----
This is the README file for the project.
The file should use UTF-8 encoding and be written using `reStructuredText
<http://docutils.sourceforge.net/rst.html>`_. It
will be used to generate the project webpage on PyPI and will be displayed as
the project homepage on common code-hosting services, and should be written for
that purpose.
Typical contents for this file would include an overview of the project, basic
usage examples, etc. Generally, including the project changelog in here is not
a good idea, although a simple "What's New" section for the most recent version
may be appropriate.
Usage
=====
install::
pip install ytest
run::
ytest --help
ytest http://ytest.com -q 2
ouput::
Description...
Namespace(config=0, name='YT', quality=0, store=None, url=['http://ytest', 'ylanted.com'], verbose=0
)
http://ytest
| ytest | /ytest-0.1.4.tar.gz/ytest-0.1.4/README.rst | README.rst |
The MIT License (MIT)
Copyright (c) 2022 Johannes Fischer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
| ytframedownloader | /ytframedownloader-0.10-py3-none-any.whl/ytframedownloader-0.10.dist-info/LICENSE.rst | LICENSE.rst |
from __future__ import unicode_literals
import youtube_dl
from pycaption import WebVTTReader
from os import remove
import re
import hashlib
from ytcc.storage import Storage
from colorama import Fore, Style
class Download():
urls = []
search_query = ''
regex = False
include_links = False
def __init__(self, args: dict, opts: dict = {}) -> None:
self.opts = {
'skip_download': True,
'writeautomaticsub': True,
'no_warnings': not args['v'],
'quiet': not args['v'],
}
self.urls = args['urls']
if args['e']:
self.regex = True
self.search_query = re.compile(args['pattern'])
else:
self.search_query = args['pattern']
self.opts.update(opts)
if args.get('links'):
self.include_links = True
def get_captions(self) -> str:
output = ''
for url in self.urls:
result = self.get_result(url)
if result != 0:
raise Exception(
'Unable to download and extract captions: {0}'.format(result))
storage = Storage(url)
file_path = storage.get_file_path()
try:
with open(file_path) as f:
output += self.get_captions_from_output(f.read(), url)
storage.remove_file()
except FileNotFoundError:
if len(self.urls) == 1:
raise NoCaptionsException("no captions found.")
else:
print("WARNING: no captions found for {}".format(url))
# remove final newline
if len(output) > 0 and output[-1] == '\n':
output = output[:-1]
return output
def get_result(self, video_id: str) -> int:
self.opts['outtmpl'] = 'subtitle_' + \
hashlib.md5(str(video_id).encode('utf-8')).hexdigest()
with youtube_dl.YoutubeDL(self.opts) as ydl:
try:
return ydl.download([video_id]) # J
except youtube_dl.utils.DownloadError as err:
raise DownloadException(
"Unable to download captions: {0}".format(str(err)))
except youtube_dl.utils.ExtractorError as err:
raise DownloadException(
"Unable to extract captions: {0}".format(str(err)))
except Exception as err:
raise DownloadException(
"Unknown exception downloading and extracting captions: {0}".format(
str(err)))
def get_captions_from_output(self, output: str, url: str) -> str:
reader = WebVTTReader()
captions = []
for caption in reader.read(output).get_captions('en-US'):
stripped = self.remove_time_from_caption(
url, str(caption).replace(r'\n', " "))
stripped += "\n"
captions.append(stripped)
if self.search_query == '':
return ''.join(item for item in captions)
return self.process_captions(captions, url)
def get_time_url(self, url, time_str):
h, m, s = time_str.split(':')
seconds = str(int(h) * 3600 + int(m) * 60 + int(s))
return url + '&t=' + str(seconds) + 's'
def process_captions(self, captions, url):
temp_final = ''
# if we have multiple urls, print the URL at the beginning
if len(self.urls) > 1:
temp_final = url + '\n'
i = -1
for caption in captions:
i += 1
stripped = caption.lower()
# temporarily remove time prefix via slicing (the time prefix is
# stable)
prefix = stripped[0:32]
stripped = stripped[32:]
# remove duplicate entries
if self.regex:
l = self.search_query.findall(stripped)
if len(l) > 0:
for match in l:
if Fore.RED + match + Style.RESET_ALL not in stripped:
stripped = stripped.replace(
match, Fore.RED + match + Style.RESET_ALL)
stripped = stripped.replace("'", "").strip()
stripped = prefix + stripped
if self.include_links:
start_time = prefix[1:9]
time_url = self.get_time_url(url, start_time)
stripped = stripped.rstrip() + ' (' + time_url + ')'
temp_final += stripped + '\n'
elif self.search_query in stripped:
# It's possible that we have duplicate entries, such as:
# [00:45:15.960 --> 00:45:15.970] will do this topological sort is what'
# [00:45:15.970 --> 00:45:20.430] will do this topological sort is what the selvam is usually called topological'
# so skip the original duplicate if we find a match like this. We trim and ignore quotes to avoid
# whitespace and quotes from stopping what would otherwise be a
# match
if i < len(captions) - 1 and stripped.strip().replace("'",
"").replace('"',
'') in str(captions[i + 1]).strip().replace("'",
"").replace('"',
''):
continue
stripped = stripped.replace("'", "").strip()
stripped = stripped.replace(
self.search_query,
Fore.RED +
self.search_query +
Style.RESET_ALL)
stripped = prefix + stripped
if self.include_links:
start_time = prefix[1:9]
time_url = self.get_time_url(url, start_time)
stripped = stripped.rstrip() + ' (' + time_url + ')'
temp_final += stripped + '\n'
return temp_final
def remove_time_from_caption(self, video_id: str, caption: str) -> str:
caption = re.sub(
r"(\d{2}:\d{2}:\d{2}.\d{3} --> \d{2}:\d{2}:\d{2}.\d{3})",
r"[\1]",
caption,
flags=re.DOTALL)
# remove first char from string (will be a quote)
return caption[1:]
class DownloadException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class NoCaptionsException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs) | ytgrep | /ytgrep-0.5.0-py3-none-any.whl/ytcc/download.py | download.py |
# YTKD
### ytkd-api is sponsored by:
<a href="https://www.developingnow.com/"><img src="https://github.com/alinisarhaider/ytkd_api/blob/master/developingco_logo.png?raw=true"/></a>
## Description
A YouTube key word detection API that takes in video URL and key words and outputs each key word along with its time of occurrence in the video.
## Installation
Download using pip via pypi.
```bash
$ pip install ytkd-api
```
## Getting started
Import YTKD:
```python
>>> from ytkd-api import YTKD
```
Assign data.
```python
>>> url = 'https://www.youtube.com/watch?v=vW9edcLqND0'
>>> keywords = 'and, to'
```
Create an object of YTKD.
```python
>>> ytkd = YTKD(url=url, keywords=keywords)
```
Get the expected processing time.
```python
>>> ytkd.get_expected_time() # returns the expected time in minutes e.g. 5
```
Get the results for the given keywords.
```python
>>> ytkd.get_results() # returns the results in form of a dictionary of type {str: list}
``` | ytkd-api | /ytkd_api-0.0.3.tar.gz/ytkd_api-0.0.3/README.md | README.md |
# ytkubevault
ytkubevault is a light wrapper of abilities to read secrets
from HashiCorp Vault running in Kubernetes.
When the microservice needs to fetch the secret value from
Vault, it has to read a token from its containing pod first.
Then this token is used to communicate with Vault in order to
obtain a second token. Your service uses the second token to
get the secrets. ytkubevault simplifies this process with one
function `get_secret_or_env(key: default:)`, which first tries
to obtain the secret from Vault, and if that didn't succeed,
reads it from the environment. A default value can be provided
as the last resort.
This is especially convenient when you are developing locally,
or the application is being built in a CI/CD pipeline where
the first token is not available.
## Install
```shell
pip install ytkubevault
```
## Usage
First define the following environment variables:
* VAULT_ENABLED
* VAULT_ROLE
* VAULT_URL
* VAULT_SECRETS_PATH
By default, `VAULT_ENABLED` is `"false"`. To enable reading from Vault,
set it to be `"true"`, case-insensitive. And then,
```python
from ytkubevault import get_secret_or_env
db_password = get_secret_or_env("DATABASE_PASSWORD")
```
Since Version 0.2.0, a `VaultClient` is added, and you can explicitly create
such a client:
```python
from ytkubevault import VaultClient
vault_client = VaultClient()
# login first
try:
vault_client.login()
except Exception as e:
print(f"Failed to login: {e}")
# Then you can do encryption, for example:
vault_client.encrypt(encrypt_key="some_key", plaintext="my_secret_message")
```
The old functions now use an implicitly created global `VaultClient`. Note that
`VaultClient` is not multithread-safe.
## Fetching secrets from outside the cluster
To be able to fetch secrets from outside the Kubernetes cluster, you need to install
the package with
```shell
pip install 'ytkubevault[dev]'
```
This will also install `kubernetes` package, which allows us to get the service account
token. Additionally, 4 environment variables need to be set:
* VAULT_DEV_REMOTE_MODE: this needs to be `true`, which is `false` by default
* VAULT_DEV_REMOTE_CLUSTER: the cluster string you want to connect to
* VAULT_DEV_REMOTE_NAMESPACE: the namespace the service is in
* VAULT_DEV_REMOTE_SERVICE_ACCOUNT: the service account name of the service | ytkubevault | /ytkubevault-0.2.0.tar.gz/ytkubevault-0.2.0/README.md | README.md |
# YTL - YAT Trailer Loading Package
Simple package for turning list of freight pieces into a trailer plan and associated metrics helpful for LTL (Less than Truckload) and Partial planning and pricing. The package includes an out-of-the-box trailer load optimization allowing for flexible trailer dimension options, allows or disallows stacking/rotating of pieces, and handling of overweight shipments/pieces. A detailed load plan is returned, as wells as several measurements like linear feet, actual volume, cubed volume, and effective trailer volume occupancy of the load plan.
The aim of the optimization is to load the provided shipments/pieces into a trailer of the specified dimensions with minimal linear foot occupancy of the trailer. The default configuration makes it easy to plug and play to use in your own Python development or in an internally hosted API (see accompanying `yat-trailer-loading-api` for a simple Python Flask implementation).
Beyond the standard out-of-the-box style implementation, the package is readily available for development of additional optimization algorithms. The optimization in this package is simulation-based, leveraging Python Classes that model Trailers, Shipments, and Pieces as well as the placement and movement of Pieces and Shipments within a Trailer. For more information on adding your own optimization, see additional details in the `Simulation Model Description` section below.
## Install
```
pip install ytl
```
## Usage
### **Setup**
Import trailer optimization service (this version is intended to support an API)
```python
from ytl import optimize_trailer_load_plan_wrapper
```
Specify shipment piece list (dimensions assumed to be in inches, weight in pounds). Packing type must be `PALLET` or `BOX` (pallets are not allowed to be stacked on boxes, even if the pieces involved allow stacking).
```python
shipment_list = [
{
"length": 40,
"width": 42,
"height": 35,
"weight": 225,
"packing": "PALLET",
"stack_limit": 2,
"num_pieces": 5
},
{
"length": 44,
"width": 40,
"height": 30,
"weight": 200,
"packing": "BOX",
"stack_limit": 3,
"num_pieces": 8
}
]
```
### **Trailer Load Optimization with Pre-Defined Trailer Type**
Define request data and call optimization function. This example sets the equipment type to a typical 53' dry-van trailer and allow shipment pieces to be rotated.
```python
request_data = {
'equipment_code' : 'DV_53',
'shipment_list' : shipment_list,
'allow_rotations' : True
}
status_code,response_data = optimize_trailer_load_plan_wrapper(request_data=request_data)
```
The `status_code` is intended to be the status associated to an API call and the `response_data` is the response body. You can interogate the response data for a large amount detail about the resulting optimization. Below are a few examples showing the linear feet occupied in the trailer and a detailed list of the load plan.
```python
print('status: {}'.format(status_code))
print('linear feet: {:.1f} ({:.0f}% of Trailer)'.format(response_data.get('linear_feet'),100*response_data.get('linear_feet_portion_of_trailer')))
print('Load Plan')
print('-'*100)
print('-'*100)
print('{:34}{:19}{:21}{:11}{:20}{}'.format('Desc','Location','Size','Weight','Orientation','Packing'))
print('-'*100)
for k,v in response_data['load_order'].items():
print(
'{name:30} {x:3.0f} x {y:3.0f} x {z:3.0f} {l:3.0f}" x {w:3.0f}" x {h:3.0f}" {weight:5,.0f} Lbs {rotated:15} {packing:12}'.format(
name=v.get('name'),
x=v.get('position')[0],
y=v.get('position')[1],
z=v.get('position')[2],
l=v.get('piece_length'),
w=v.get('piece_width'),
h=v.get('piece_height'),
weight=v.get('piece_weight'),
rotated='Rotated' if v.get('piece_is_rotated') else 'Not Rotated',
packing=v.get('piece_packing')
)
)
```
The options for predefined equipment types are available in `ytl.STANDARD_TRAILER_DIMS`. The `code` values are to be used in the optimization function. The inner length, inner width, inner hieght, and max weight attributes are availalbe there for additional information. The `inner_height` field for open top trailers is set to be the estimated maximum freight height for typical bridge clearances.
```python
from ytl import STANDARD_TRAILER_DIMS
import json
print(json.dumps(STANDARD_TRAILER_DIMS,indent=2))
```
### **Trailer Load Optimization with Specified Trailer Dimensions**
```python
trailer_dims = {
"inner_width": 98.5,
"inner_length": 630,
"inner_height": 108,
"max_weight": 42500
}
request_data = {
'trailer_dims' : trailer_dims,
'shipment_list' : shipment_list,
'allow_rotations' : False
}
status_code,response_data = optimize_trailer_load_plan_wrapper(request_data=request_data)
```
### **Trailer Load Optimization with Altered Optimization Parameters**
You can alter the optimization as well by specifying router keys for the piece and shipment arrangement algorithms.
```python
request_data = {
'equipment_code' : 'DV_53',
'shipment_list' : shipment_list,
'allow_rotations' : True,
'piece_arrangement_algorithm' : 'NAIVE',
'shipment_optimization_ls' : [
{
'algorithm' : 'GREEDY_LOAD',
'max_iter' : 5,
'timeout' : 2.5,
}
]
}
status_code,response_data = optimize_trailer_load_plan_wrapper(request_data=request_data)
```
The piece arrangement algorith is a single use optimization that arranges (potentially stacking) pieces into shipments. The shipment arrangement algorithm is iterative optimization that attempts to find the best way (*best meaning minimal linear feet occupied) of loading those shipments into the trailer. Since the shipment arrangement is an iterated optimization, you can provide a list of algorithm parameters to use. See `ytl.optimizer_functions.PIECE_ARRANGEMENT_ROUTER` for `piece_arrangement_algorithm` options and `ytl.optimizer_functions.SHIPMENT_ARRANGEMENT_ROUTER` for `algorithm` options in the `shipment_optimization_ls` parameter. Also see `ytl.defaults.DEFAULT_PIECE_ARRANGEMENT_ALGORITHM` and `ytl.defaults.DEFAULT_SHIPMENT_ARRANGEMENT_ALGORITHM` for default values.
```python
import ytl
import json
# Piece arrangement algorithm options
print(json.dumps([b for a,b in ytl.optimizer_functions.PIECE_ARRANGEMENT_ROUTER.values()],indent=2))
# Piece arrangement algorithm default
print(ytl.defaults.DEFAULT_PIECE_ARRANGEMENT_ALGORITHM)
# Shipment arrangement algorithm options
print(json.dumps([b for a,b in ytl.optimizer_functions.SHIPMENT_ARRANGEMENT_ROUTER.values()],indent=2))
# Shipment arrangement algorithm default
print(json.dumps(ytl.defaults.DEFAULT_SHIPMENT_ARRANGEMENT_ALGORITHM,indent=2))
```
## Simulation Model Description
Given the complexity of the trailer loading optimization problem, we are well-advised to seek approximate optimization approach rather than exact optimization that may be very costly and time-consuming. This package includes an object-oriented framework for simulation-based optimizers, which can be used as the underpinning for approaches like simulated annealing/MCMC style methods, genetic algorithms, and other heuristic/stochastic/dynamical systems optimization schemes.
The simulation framework is composed of 3 types of objects: Trailers, Shipments, and Pieces.
- **Piece** (`ytl.logistics_objects.Piece`): Pieces are indivisible objects intended to be loaded into a trailer. They are assumed to be rectangular prisms with length, width, and height dimensions, as well as weight, stackable, and rotation-allowed attributes. Only rotations in the x-y plane are allowed (pieces may be turned but not allowed to be tipped on their side or set upside down).
- **Shipment** (`ytl.logistics_objects.Shipment`): Shipments are made up of Piece objects and store information about the relative location of Pieces arranged within its boundaries. Shipments have calculated length, width, height, and weight attributes based on the contained pieces and their relative placement within the Shipment.
- **Trailer** (`ytl.logistics_objects.Trailer`): Trailer objects are made up of Shipment objects and store information about where the relative location of Shipments arraged within its boundaries. Trailers have inner length, inner width, inner height, and maximum weight parameters that dictate how Shipment objects can be arranged inside of them. Shipment objects are not allowed to be stacked inside of a trailer - any stacking of shipping units must be treated as stackinng of Piece objects to make up a Shipment.
These definitions set up a natural two-stage optimization framework. Based on user inputs, create a collection of Piece objects and a Trailer of the appropriate dimensions. The first stage of the optimization is to arrange the Piece objects into Shipments, and the second is to arrange the Shipment objects in the Trailer object. These two stages are implemented in `ytl/optimizer_functions/__init__.py`:
- **Piece Arrangement Optimation Router** (`ytl.optimizer_functions.optimize_pieces_arrangement`): This function takes as input parameters a list of Piece objects and algorithm specification, and it returns a list of Shipment objects containing all provided pieces. This optimization is built to be run to generate a list of Shipment objects from provided Piece objects (not built to be run iteratively).
- **Shipment Arrangement Optimation Router** (`ytl.optimizer_functions.optimize_shipment_arrangement`): This function takes as parameters a Trailer object (assumed to have Shipment objects already defined) and algorithm `max_iter`/`timeout` parameters to control optimization runtime. It returns `None`, but makes updates to the Trailer object in place that model rearrangement of shipments within the trailer. These optimizers are intended to be run iteratively on a Trailer object in the process of a simulation-based optimization. Loss function measurements are made based on the state of the Trailer object, which in our case we simply use the linear foot trailer occupancy as the loss function.
The optimization is implemented in `ytl.services.trailer_load.optimize_trailer_load_plan`, which manages the creation of Piece/Trailer objects from user inputs and both stages (Piece and Shipment arrangement) of the optimization. The options used for each stage of the optimization can be specified in the user request, otherwise the defaults set in `ytl.defaults` are used.
Further development can be done by adding additional optimizers to `ytl.optimizer_functions.optimize_pieces_arrangement` and/or `ytl.optimizer_functions.optimize_shipment_arrangement`, altering the goal of the optimization by using another loss function instead of linear feet, or altering the valid Piece arrangement methodology inside the Piece/Shipment objects.
For examples of how to generate and work with Piece, Shipment, and Trailer objects, start with `demo/demo_trailer_load.py` and `ytl.services.trailer_load.optimize_trailer_load_plan`.
## Reference
Portions of this package make use of an optimization presented by Dube and Kanavalty in the conference papaer cited below. There is a Python implementation of this algorithm available on [PyPi](https://pypi.org/project/py3dbp/), which is a derivative work of a Go implementation available on [GitHub](https://github.com/bom-d-van/binpacking) (The article by Dube and Kanavalty is also available in this GitHub repository). The `py3dbp` Python implementation, with minor variations to suite our purposes, is in the `ytl.py3dbp` module of this package and leveraged in portions of the trailer load optimization services.
> E. Dube and L. Kanavalty, "Optimizing Three-Dimensional Bin Packing Through Simulation", Proceedings of the Sixth IASTED International Confernece Modelling, Simulation, and Optimization pp. 1-7, September 11-13, 2006, Gaborone, Botswana.
| ytl | /ytl-1.0.1.tar.gz/ytl-1.0.1/README.md | README.md |
MIT License
Copyright (c) 2023 Chris Ismael / ChatGPT
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. | ytlisten | /ytlisten-0.24.tar.gz/ytlisten-0.24/LICENSE.md | LICENSE.md |
# ytlisten
`ytlisten` is a command-line based YouTube audio player that allows you to search and play YouTube videos as audio. This is a Python application that uses the YouTube API to search for and retrieve videos and uses the VLC media player to play the audio.
## Installation
1. Install the VLC app
```
# Linux
sudo apt-get install vlc # Linux
# Mac
brew install vlc
```
Before installing ytlisten, you must first install the VLC media player. You can download it from the official VLC website at https://www.videolan.org/vlc/#download. Make sure to install the version that corresponds to your operating system.
2. Get a YouTube API Key from Google:
- Go to the [Google Developers Console](https://console.developers.google.com).
- Create a new project.
- In the left sidebar, click on "APIs & Services" and then "Library".
- Search for "YouTube Data API v3" and click on it.
- Click the "Enable" button.
- In the left sidebar, click on "Credentials".
- Click the "Create credentials" button and select "API key".
- Copy the generated API key.
- In your terminal, create a new environment variable called `YT_API_KEY` and set its value to your API key:
```
export YT_API_KEY="YOUR_API_KEY_HERE"
```
3. Install `ylisten` using pip
```
pip install ylisten
```
## Usage
Once installed, you can use the ytlisten command to search for and play audio tracks from Youtube. Here's an example:
```
ytlisten despacito
```
This will search Youtube for the keyword "despacito" and play the audio from the first video in the search results. You can also specify a Youtube video URL directly:
```
ytlisten https://www.youtube.com/watch?v=kJQP7kiw5Fk
```
That's it! You should now be able to run ytlisten and search for and listen to YouTube videos from the command line.
## Limitations
Please note that `ytlisten` is not an official YouTube client and is not affiliated with YouTube in any way. It's a simple command-line tool that uses the YouTube Data API to search for and play YouTube videos as audio files.
Also, keep in mind that the YouTube Data API has certain usage limits and quota restrictions. If you exceed these limits, your API key may be temporarily or permanently blocked by Google. Please refer to the official documentation for more information.
## Contributions / Development
Contributions are welcome!
```
python setup.py sdist bdist_wheel
# https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#create-an-account
twine upload dist/*
```
## License
[MIT License](LICENSE.md) | ytlisten | /ytlisten-0.24.tar.gz/ytlisten-0.24/README.md | README.md |
import os
import sys
import tty
import time
import termios
import argparse
import vlc
import pytube
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
# Set up the YouTube Data API v3 client
API_KEY = os.environ["YT_API_KEY"]
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION, developerKey=API_KEY)
def get_video_urls(query, count=1):
youtube = build(YOUTUBE_API_SERVICE_NAME,
YOUTUBE_API_VERSION, developerKey=API_KEY)
request = youtube.search().list(
part="id",
q=query,
type="video",
videoDefinition="high",
maxResults=count
)
response = request.execute()
video_id = response['items'][0]['id']['videoId']
return f"https://www.youtube.com/watch?v={video_id}"
def get_stream(video_url):
video = pytube.YouTube(video_url)
print(f'[{video.title}]')
audio_stream = video.streams.filter(only_audio=True).first()
return audio_stream
def get_player(audio_stream):
# create a new instance of the vlc module with the --no-xlib argument
args = ["--no-xlib", "--quiet", "--no-video"]
instance = vlc.Instance(args)
# create a media player with the new instance
player = instance.media_player_new()
# create a media object with the audio stream URL
media = instance.media_new(audio_stream.url)
player.set_media(media)
return player
def main():
parser = argparse.ArgumentParser(
description='Listen to audio of YouTube videos from the command line.')
parser.add_argument('query', nargs='+', help='search query for YouTube')
args = parser.parse_args()
# Get the search query from the command line arguments
query = ' '.join(args.query).strip()
# Save the terminal settings
old_settings = termios.tcgetattr(sys.stdin)
try:
url = get_video_urls(query)
audio_stream = get_stream(url)
player = get_player(audio_stream)
player.play()
# Wait for the media to finish playing
player_event_manager = player.event_manager()
player_event_manager.event_attach(
vlc.EventType.MediaPlayerEndReached, lambda _: player.stop())
# Set the terminal to raw mode
tty.setraw(sys.stdin.fileno())
print('Press SPACE to pause. ENTER to quit.')
while True:
# Wait for 100 milliseconds
time.sleep(0.5)
# Wait for a single keypress
key = sys.stdin.read(1)
if key == ' ':
player.pause() # Pause the playback
elif key == '\r':
player.stop() # Stop the playback
break
except IndexError as e:
print("No media found. Try tweaking your search")
except KeyboardInterrupt as e:
# Handle keyboard interrupt gracefully
player.stop()
print("\nAudio playback stopped by the user.")
except HttpError as e:
print(f'An error occurred: {e}')
except vlc.VLCException as e:
print('Error', e)
finally:
# Restore the terminal settings
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
if __name__ == "__main__":
main() | ytlisten | /ytlisten-0.24.tar.gz/ytlisten-0.24/ytlisten_proj/ytlisten.py | ytlisten.py |
ytlog - log module for humans
============================
We use logging everyday, bug it's easy to forget how to use buildin
logging module of Python, because the configuration looks complicated.
So I copy log.py module from [Tornado(github)](https://github.com/facebook/tornado)
pip install ytlog
How to use
----------
from ytlog import get_logger
log = get_logger({"name": "app"})
log.info("Mission starts.")
Then we will see logging in console and log file under directory /data/logs/

So easy!
If you want to change log file path, just add arg "dir"
e.g.
log = get_logger({"name": "app", "dir": "./"})
| ytlog | /ytlog-0.6.tar.gz/ytlog-0.6/README.md | README.md |
from pathlib import Path
import re
from sh import cd, ErrorReturnCode
from ytmusicapi import YTMusic
from ytm_ll_dl.slugify import slugify
from ytm_ll_dl.index_helpers import Index, IndexStatus
from ytm_ll_dl.bash import bash
import click
@click.command()
@click.option(
'--output',
required=True,
type=Path,
help='Output directory.'
)
@click.option(
'--limit',
default=999999,
type=int,
help='Number of tracks to get from the top.'
)
@click.option(
'--skip',
default=0,
type=int,
help='Skip tracks from the bottom.'
)
def main(
output: Path,
limit: int,
skip: int
):
"""Download liked music from YTM"""
for bin in ["curl", "ffmpeg"]:
if bash(f"command -v '{bin}' || echo 'None'").strip() == 'None':
print(f"Program `{bin}` not found but required. Please install it.")
exit(1)
data_dir = output
# Create data directory
bash(f"mkdir -p {str(data_dir)}")
index = Index(data_dir)
auth_headers = data_dir / "./.headers_auth.json"
if not auth_headers.exists():
YTMusic.setup(filepath=str(auth_headers))
ytm = YTMusic(str(auth_headers))
print("Getting liked music...")
tracks = ytm.get_liked_songs(limit)["tracks"]
print(f"Got {len(tracks)} tracks")
print("")
i = 0
for track in reversed(tracks):
i += 1
if skip > 0:
skip -= 1
continue
with cd(data_dir):
prefix = f"[{i}/{len(tracks)}] "
log = lambda x: print(prefix + str(x))
id = track['videoId']
artist = track['artists'][0]['name']
title = track['title']
album = track['album']['name'] if track['album'] else None
mp3 = f"{slugify(artist)}, {slugify(title)}.mp3"
mp3_tmp = ".tmp.mp3"
mp3_tmp2 = ".tmp2.mp3"
status = index.get(id)
if status is not None:
log(f"Video {mp3} already in index ({status.value}), skipping...")
continue
else:
log(f"Downloading {mp3}...")
for file in [mp3, mp3_tmp, mp3_tmp2]:
Path(file).unlink(missing_ok=True)
try:
output = bash(
"yt-dlp "
f"https://www.youtube.com/watch?v={id} "
f"-x -o '{mp3_tmp}'"
)
except ErrorReturnCode as e:
log("yt-dlp failed")
log(e)
index.add(id, IndexStatus.failed)
continue
for line in output.splitlines():
tag = "[ExtractAudio] Destination: "
if line.startswith(tag):
new_dest = line[len(tag):]
bash(f"ffmpeg -i '{new_dest}' '{mp3_tmp}'")
bash(f"rm '{new_dest}'")
break
thumbnail = "current.png"
bash(f"curl '{track['thumbnails'][-1]['url']}' -o {thumbnail}")
title = re.sub('\"', '\\"', title)
artist = re.sub('\"', '\\"', artist)
album = re.sub('\"', '\\"', album) if album else None
title = re.sub('`', '\\`', title)
artist = re.sub('`', '\\`', artist)
album = re.sub('`', '\\`', album) if album else None
bash(
f"ffmpeg -y -i '{mp3_tmp}' -i {thumbnail} " +
"-map 0:0 -map 1:0 -c copy -id3v2_version 3 -metadata:s:v " +
"title=\"Album cover\" -metadata:s:v comment=\"Cover (front)\" " +
f"-metadata title=\"{title}\" " +
f"-metadata artist=\"{artist}\" " +
(f"-metadata album=\"{album}\" " if album else " ") +
f"-c:a libmp3lame {mp3_tmp2}"
)
Path(thumbnail).unlink(missing_ok=True)
Path(mp3_tmp2).rename(mp3)
index.add(id, IndexStatus.ready)
if __name__ == '__main__':
main() | ytm-ll-dl | /ytm_ll_dl-0.1.0-py3-none-any.whl/ytm_ll_dl/main.py | main.py |
<div align="center">
<img src=".github/ytmdl.png">
</div>
<div align="center">
<h1>YouTube Music Downloader</h1>
<h4>Download songs from YouTube by getting the audio from YouTube and the metadata from sources like Itunes, Spotify, Gaana etc.</h4>
</div>
<div align="center" width="60%" height="auto">
<br>
<img src=".github/ytmdl.gif">
</div>
<div align="center">
<br/>
[](https://www.python.org/)<br/><br/>
<img src="https://img.shields.io/badge/Maintained%3F-Yes-blueviolet?style=for-the-badge">
 ](https://apis.deepjyoti30.dev/repostatus/badge?repo=deepjyoti30%2Fytmdl&style=for-the-badge) [](LICENSE.md)   [](https://img.shields.io/badge/dynamic/json?style=for-the-badge&maxAge=86400&label=downloads&query=%24.total_downloads&url=https%3A%2F%2Fapi.pepy.tech%2Fapi%2Fprojects%2Fytmdl) [](http://makeapullrequest.com) [](https://t.me/ytmdl)
<p>
<a href="https://ko-fi.com/deepjyoti30"><img src="https://raw.githubusercontent.com/adi1090x/files/master/other/kofi.png" alt="Support me on ko-fi"></a>
</p>
<br/>
### \[[Web App](#web-app)] \[[Why This?](#why-this)] \[[Support the Project](#support-the-project)] \[[Installation](#installation)] \[[Configuration](#configuration)] \[[WiKi](https://github.com/deepjyoti30/ytmdl/wiki/)]
<br/>
</div>
## Web App
Ytmdl also has an web app, you can try it out [here](https://ytmdl.deepjyoti30.dev/)
## Why this?
This app downloads a song by getting the audio from Youtube sources **using** youtube-dl and then adds song information like
artist name, album name, release date, thumbnail etc by fetching it from sources like Itunes, Spotify, Gaana and other sources.
**NO**. YoutubeDL doesn't do that. All youtube-dl does is lets you download audio from a video that you specify.
**This app is not yet another youtube-dl clone.**
## Support the Project?
Help the development of this project by becoming a backer or a sponsor.
### [Become a Backer](https://opencollective.com/ytmdl#backer)
### [Become a sponsor](https://opencollective.com/ytmdl#sponsor)
If you like my work, consider buying me a coffee or donating. In case you want to become a patron, join my [Pateron](https://www.patreon.com/deepjyoti30)
<p align="left">
<a href="https://www.paypal.me/deepjyoti30" target="_blank"><img alt="undefined" src="https://img.shields.io/badge/paypal-deepjyoti30-blue?style=for-the-badge&logo=paypal"></a>
<a href="https://www.patreon.com/deepjyoti30" target="_blank"><img alt="undefined" src="https://img.shields.io/badge/Patreon-deepjyoti30-orange?style=for-the-badge&logo=patreon"></a>
<a href="https://ko-fi.com/deepjyoti30" target="_blank"><img alt="undefined" src="https://img.shields.io/badge/KoFi-deepjyoti30-red?style=for-the-badge&logo=ko-fi"></a>
</p>
## Requirements
- Python 3.6.1
- ffmpeg
## Installation
- [PyPi](#pypi)
- [Arch Linux](#arch-linux)
- [Gentoo](#gentoo)
- [NixOS](#nixos)
- [Windows](#windows)
- [Manual](#manual)
### PyPI
```console
pip install ytmdl
```
> NOTE: System wide installation requires `sudo`
### Arch Linux
`ytmdl` is available in AUR as `ytmdl`. It can be found [here](https://aur.archlinux.org/packages/ytmdl/)
> NOTE: The git version is availble as `ytmdl-git` in AUR.
### Gentoo
`ytmdl` can be installed in Gentoo by the following commands
```console
# First set up src_prepare-overlay (as root)
emerge -av --noreplace app-eselect/eselect-repository
eselect repository enable src_prepare-overlay
emaint sync -r src_prepare-overlay
# Finally emerge ytmdl (as root)
emerge -av --autounmask net-misc/ytmdl
```
Available in **src_prepare-overlay** [here](https://gitlab.com/src_prepare/src_prepare-overlay)
### NixOS
`ytmdl` can be installed using Nix with the command
```console
nix-env -iA nixos.ytmdl
```
### Windows
You need to install `ffmpeg` in order for `ytmdl` to work properly. This can be done by downloading the `ffmpeg` binary from [here](https://ffmpeg.org/download.html). Once downloaded, extract the file and find the `ffmpeg.exe` file. Copy the directory's path and add it to PATH in the following way.
```console
setx path "%path%;C:\your\path\here\"
```
Once `ffmpeg` is installed, install `ytmdl` using the following command
```console
pip install ytmdl --upgrade
```
> NOTE: You'll need to have Python 3.6.1 or more installed.
Optionally, also install the latest version of `downloader-cli` and `simber` using the following command:
```console
pip install simber downloader-cli --upgrade
```
### Manual
You can manually install `ytmdl` by cloning this repository and running the `setup.py` script.
1. Install `setuptools` if it isn't already:
```console
pip install setuptools
```
1. Clone this repo:
```console
git clone https://github.com/deepjyoti30/ytmdl
```
1. Move into the `ytmdl` directory and run the `setup.py` script:
```console
cd ytmdl
sudo python setup.py install
```
## Usage
```console
usage: ytmdl [-h] [-q] [-o OUTPUT_DIR] [--song SONG-METADATA]
[--choice CHOICE] [--artist ARTIST] [--album ALBUM]
[--disable-metaadd] [--skip-meta] [-m] [--itunes-id ITUNES_ID]
[--spotify-id SPOTIFY_ID] [--disable-sort] [--ask-meta-name]
[--on-meta-error ON_META_ERROR] [--proxy URL] [--url URL]
[--list PATH TO LIST] [--nolocal] [--format FORMAT] [--trim]
[--version] [--keep-chapter-name] [--download-archive FILE]
[--ignore-chapters] [--ytdl-config PATH] [--dont-transcode]
[--pl-start NUMBER] [--pl-end NUMBER] [--pl-items ITEM_SPEC]
[--ignore-errors] [--title-as-name] [--level LEVEL]
[--disable-file] [--list-level]
[SONG_NAME ...]
positional arguments:
SONG_NAME Name of the song to download. Can be an URL to a
playlist as well. It will be automatically recognized.
options:
-h, --help show this help message and exit
-q, --quiet Don't ask the user to select songs if more than one
search result. The first result in each case will be
considered.
-o OUTPUT_DIR, --output-dir OUTPUT_DIR
The location for the song to be downloaded to. When no
argument is passed, the default locations of SONG_DIR
or XDG_MUSIC_DIR are used.
--proxy URL Use the specified HTTP/HTTPS/SOCKS proxy. To enable
SOCKS proxy, specify a proper scheme. For example
socks5://127.0.0.1:1080/. Pass in an empty string
(--proxy "") for direct connection
--url URL Youtube song link.
--list PATH TO LIST Download list of songs. The list should have one song
name in every line.
--nolocal Don't search locally for the song before downloading.
--format FORMAT The format in which the song should be downloaded.
Default is mp3, but can be set in config. Available
options are ['mp3', 'm4a', 'opus']
--trim, -t Trim out the audio from the song. Use underlying
speech and music segmentation engine to determine and
keep only the music in the file. Useful in songs where
there are speeches, noise etc before/after the start
of the song. Default is false.
--version show the program version number and exit
--keep-chapter-name Keep the title extracted from the chapter in order to
search for the metadata. If not passed, the user will
be asked if they'd like to change the title with which
the metadata will be searched.
--download-archive FILE
Skip downloading songs that are present in the passed
file. The songs are matched by using the videoId. All
downloaded song Id's are automatically added to the
file.
--ignore-chapters Ignore chapters if available in the video and treat it
like one video
--ytdl-config PATH Path to the youtube-dl config location or the
directory
--dont-transcode Don't transcode the audio after downloading.
Applicable for OPUS format only. (Default: false)
Metadata:
--song SONG-METADATA The song to search in Metadata. Particularly useful
for songs that have the names in a different language
in YouTube. For Example, greek songs.
--choice CHOICE The choice that the user wants to go for. Usefull to
pass along with --quiet. Choices start at 1
--artist ARTIST The name of the song's artist. Pass it with a song
name.
--album ALBUM The name of the song's album. Pass it with a song
name.
--disable-metaadd Disable addition of passed artist and album keyword to
the youtube search in order to get a more accurate
result. (Default: false)
--skip-meta Skip setting the metadata and just copy the converted
song to the destination directory. '--manual-meta'
will override this option, pass only one of them.
-m, --manual-meta Manually enter song details.
--itunes-id ITUNES_ID
Direct lookup from itunes. If passed, metadata will be
automatically added.
--spotify-id SPOTIFY_ID
Direct lookup for Spotify tracks using the ID. If
passed, metadata will be automatically added.
--disable-sort Disable sorting of the metadata before asking for
input. Useful if the song is in some other language
and/or just a few providers are used.
--ask-meta-name Ask the user to enter a separate name for searching
the metadata (Default: false)
--on-meta-error ON_META_ERROR
What to do if adding the metadata fails for some
reason like lack of metadata or perhaps a network
issue. Options are ['exit', 'skip', 'manual']
Playlist:
--pl-start NUMBER Playlist video to start at (default is 1)
--pl-end NUMBER Playlist video to end at (default is last)
--pl-items ITEM_SPEC Playlist video items to download. Specify indices of
the videos present in the playlist separated by commas
like: '--playlist-items 1, 2, 4, 6' if you want to
download videos indexed 1, 2, 4 and 6. Range can also
be passed like: '--playlist-items 1-3, 5-7' to
download the videos indexed at 1, 2, 3, 5, 6, 7.
--ignore-errors Ignore if downloading any video fails in a playlist.
If passed, the execution will move to the next video
in the passed playlist.
--title-as-name Use the title of the video as the name of the song to
search for metadata. If not passed, user will be asked
if they want to use a different name and continue
accordingly.
Logger:
--level LEVEL The level of the logger that will be used while
verbosing. Use `--list-level` to check available
options.
--disable-file Disable logging to files
--list-level List all the available logger levels.
```
## Configuration
### Setup
The defaults can be changed by editing the config file in ytmdl folder in your .config folder
The config will be created automatically the first time you run `ytmdl` and will be present in ~/.config/ytmdl/config
However, it can be created manually by the following command
```console
mkdir -p ~/.config/ytmdl; curl https://raw.githubusercontent.com/deepjyoti30/ytmdl/master/examples/config > ~/.config/ytmdl/config
```
Above command will download the config from the repo and save it in the `~/.config/ytmdl/` directory.
### Supported Options
As of the latest development branch, the following options can be changed from the config
| Name | Description | Default |
| :------------------: | ------------------------------------------------ | ------------------------------ |
| `SONG_DIR` | Directory to save the songs in after editing | Current directory |
| `SONG_QUALITY` | Quality of the song | 320kbps |
| `METADATA_PROVIDERS` | Which API providers to use for metadata | all supported options are used |
| `DEFAULT_FORMAT` | Default format of the song | mp3 |
| `ON_META_ERROR` | What to do if error occurs while writing meta | exit |
| `ITUNES_COUNTRY` | Which region to use while searching from Itunes | US |
| `SPOTIFY_COUNTRY` | Which market to use while searching from Spotify | US |
### Advanced Configuration
#### Dynamically storing songs
`SONG_DIR` field also takes values that are extracted from the song being downloaded
The `SONG_DIR` field needs to be passed some special values in order to achieve that. The string is scanned and when a `$` sign occurs, the special string will start and each directory can be separated by using an `->` sign.
To save the song in the `/dir/<album_name>/<artist_name>/<title>/<song_name>.mp3` format, the following needs to be added in the `SONG_DIR` field.
```
SONG_DIR="/dir$Album->Artist->Title"
```
Above will extract to the following directory structure when a song named `Cradles` by artist `Sub Urban` from the album `Cradles - Single`
```
|--dir
|--Cradles - Single
|--Sub Urban
|--Cradles
|--Cradles.mp3
```
In order to pass the name with which the song should be saved, the last attribute can be passed between `[]`.
If the `SONG_DIR` field is `/dir$Album->[Artist]` will extract to the following directory structure
```
|--dir
|--Cradles - Single
|--Sub Urban.mp3
```
#### Supported options for dynamic storing
As of the latest source, the following options can be passed to the special string in order to create dynamic directories
| Name | |
| :-----------: | ----------------------- |
| `Artist` | Artist Of the Song |
| `Album` | Album Of the Song |
| `Title` | Title Of the Song |
| `Genre` | Genre Of the Song |
| `TrackNumber` | TrackNumber Of the Song |
| `ReleaseDate` | ReleaseDate Of the Song |
| ytmdl | /ytmdl-2022.12.25.tar.gz/ytmdl-2022.12.25/README.md | README.md |
=====
ytmpc
=====
Youtube MPlayer Controller
ytmpc is a set of tools to manage youtube searches/channels you want to watch and opens them in (S)Mplayer
At the moment the tools that are included in this package are: "ytmpc" and "yt_curses"
License
=======
ytmpc is free software and licensed under the GNU GPLv3 or later. You
are welcome to change and redistribute it under certain conditions. For more
information see the COPYING file or visit http://www.gnu.org/licenses/gpl-3.0.html
yt_curses
---------
The main tool now is yt_curses (a curses-wrapper around of ytmpc)
Its inspired by minitube, but has a quick console curces gui, it starts the videos from the oldest to the newest in the choosen time-span. So you dont have to rearange a playlist like in minitube to get multipart videos run in the right direction. Additionaly another point why I wrote it, if not the most important one, is that it uses mplayer instead of gstreamer. On machines like some of mine (amd zacate) that makes 1 to 1.5 cores busy if you watch it in 720p, and browsing and other multitasking tasks slow down badly. with ytmpc/mplayer it only keeps here 0.5-0.7 cores busy.
if you like it and want to give me suggestions what I should make better or anything else, feel free to write me a email to [email protected]
Just start it with:
> ./yt_curses
its pretty much self explainable, just press the key that you see in front of the function you want:
With the keys [c] and [s] you can switch the stream mode s for a normal youtube search and c for channel.
Then press [n] for new, to add a new search/channel. Now your most recent Searches gets listed and you can, start them again with the numbers in front of it.
------------------------------------------------------------------------
ytmpc
-----
The rest of the helpfile is to show you how you can use the ytmpc programm directly, its less user-friendly but has yet some more features, like downloading your selection. or a shuffle mode:
Commands:
---------
command: search / download / stream / playlist
query: 'multiple keywords' / singlekeyword / u_author
order: relevance, viewcount, published or rating
number of results: 1 - 50 | -1 for all (50 is maximum)
timespan (optional): today, week, month, all
mode (optional): shuffle, reverse
Controls:
---------
p = prev
n = next
SPACE = pause
Strg + q = quit
Example usage:
---------------
./ytmpc search 'ukf drum and bass' rating 20 shuffle
Returns 20 videos with query sorted by rating and shuffled
./ytmpc download 'ukf drum and bass' viewcount 10 shuffle
Downloads 10 videos with query sorted by viewcount and shuffled
./ytmpc stream 'ukf drum and bass' relevance 2 month
Stream 2 videos with query sorted by relevance and uploaded between now and a month ago
./ytmpc stream u_PsyStarcraft published 10
Stream the 10 latest videos of the Channel from PsyStarcraft
./ytmpc stream u_hdstarcraft published 10 reverse
Stream the 10 latest videos in reverse order from hdstarcrafts channel
./ytmpc stream u_hdstarcraft published -1 reverse today
Stream all the latest videos FROM TODAY in reverse order from hdstarcrafts channel
./ytmpc playlist 'ukf drum and bass' relevance 2 month
Outputs a playlist in the m3u format to the console
hope that can be used for piping playlists to other players
./ytmpc playlist 'ukf drum and bass' relevance 2 month > /tmp/ytmpc.m3u; totem /tmp/ytmpc.m3u
Creates a temporary playlist and opens it with totem
INSTALL
=======
Either clone the git tree and then with root-privileges call:
> python setup.py install
Or with Pip:
> pip install ytmpc
***
(don't forget to install SMPlayer with apt/yum or whatever tools your distribution gives you,
because SMPlayer is not in pypi so pip cannot install that dependency)
***
Requirements:
=============
SMPlayer
Python 2.7+
python-gdata
Enjoy!
Spiderbit | ytmpc | /ytmpc-0.1.1-dirty.tar.gz/ytmpc-0.1.1-dirty/README | README |
import os, sys, re
from distutils.core import Command
from distutils.command.sdist import sdist as _sdist
from distutils.command.build import build as _build
versionfile_source = None
versionfile_build = None
tag_prefix = None
parentdir_prefix = None
VCS = "git"
IN_LONG_VERSION_PY = False
LONG_VERSION_PY = '''
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
import subprocess
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError, e:
if verbose:
print "unable to run %%s" %% args[0]
print e
return None
stdout = p.communicate()[0].strip()
if p.returncode != 0:
if verbose:
print "unable to run %%s (error)" %% args[0]
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print "variables are unexpanded, not using"
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print "discarding '%%s', no digits" %% ref
refs.discard(ref)
# Assume all version tags have a digit. git's %%d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print "remaining refs:", ",".join(sorted(refs))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print "picking %%s" %% r
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print "no suitable tags, using full revision id"
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print "no .git in", root
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print "tag '%%s' doesn't start with prefix '%%s'" %% (stdout, tag_prefix)
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print "guessing rootdir is '%%s', but '%%s' doesn't start with prefix '%%s'" %% \
(root, dirname, parentdir_prefix)
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = "%(TAG_PREFIX)s"
parentdir_prefix = "%(PARENTDIR_PREFIX)s"
versionfile_source = "%(VERSIONFILE_SOURCE)s"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
'''
import subprocess
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError, e:
if verbose:
print "unable to run %s" % args[0]
print e
return None
stdout = p.communicate()[0].strip()
if p.returncode != 0:
if verbose:
print "unable to run %s (error)" % args[0]
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print "variables are unexpanded, not using"
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print "discarding '%s', no digits" % ref
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print "remaining refs:", ",".join(sorted(refs))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print "picking %s" % r
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print "no suitable tags, using full revision id"
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.abspath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print "no .git in", root
return {}
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print "tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix)
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.abspath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.abspath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print "guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" % \
(root, dirname, parentdir_prefix)
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
import sys
def do_vcs_install(versionfile_source, ipy):
GIT = "git"
if sys.platform == "win32":
GIT = "git.cmd"
run_command([GIT, "add", "versioneer.py"])
run_command([GIT, "add", versionfile_source])
run_command([GIT, "add", ipy])
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
run_command([GIT, "add", ".gitattributes"])
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.7) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
version_version = '%(version)s'
version_full = '%(full)s'
def get_versions(default={}, verbose=False):
return {'version': version_version, 'full': version_full}
"""
DEFAULT = {"version": "unknown", "full": "unknown"}
def versions_from_file(filename):
versions = {}
try:
f = open(filename)
except EnvironmentError:
return versions
for line in f.readlines():
mo = re.match("version_version = '([^']+)'", line)
if mo:
versions["version"] = mo.group(1)
mo = re.match("version_full = '([^']+)'", line)
if mo:
versions["full"] = mo.group(1)
return versions
def write_to_version_file(filename, versions):
f = open(filename, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
print "set %s to '%s'" % (filename, versions["version"])
def get_best_versions(versionfile, tag_prefix, parentdir_prefix,
default=DEFAULT, verbose=False):
# returns dict with two keys: 'version' and 'full'
#
# extract version from first of _version.py, 'git describe', parentdir.
# This is meant to work for developers using a source checkout, for users
# of a tarball created by 'setup.py sdist', and for users of a
# tarball/zipball created by 'git archive' or github's download-from-tag
# feature.
variables = get_expanded_variables(versionfile_source)
if variables:
ver = versions_from_expanded_variables(variables, tag_prefix)
if ver:
if verbose: print "got version from expanded variable", ver
return ver
ver = versions_from_file(versionfile)
if ver:
if verbose: print "got version from file %s" % versionfile, ver
return ver
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if ver:
if verbose: print "got version from git", ver
return ver
ver = versions_from_parentdir(parentdir_prefix, versionfile_source, verbose)
if ver:
if verbose: print "got version from parentdir", ver
return ver
if verbose: print "got version from default", ver
return default
def get_versions(default=DEFAULT, verbose=False):
assert versionfile_source is not None, "please set versioneer.versionfile_source"
assert tag_prefix is not None, "please set versioneer.tag_prefix"
assert parentdir_prefix is not None, "please set versioneer.parentdir_prefix"
return get_best_versions(versionfile_source, tag_prefix, parentdir_prefix,
default=default, verbose=verbose)
def get_version(verbose=False):
return get_versions(verbose=verbose)["version"]
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ver = get_version(verbose=True)
print "Version is currently:", ver
class cmd_build(_build):
def run(self):
versions = get_versions(verbose=True)
_build.run(self)
# now locate _version.py in the new build/ directory and replace it
# with an updated value
target_versionfile = os.path.join(self.build_lib, versionfile_build)
print "UPDATING", target_versionfile
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % versions)
f.close()
class cmd_sdist(_sdist):
def run(self):
versions = get_versions(verbose=True)
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory (remembering
# that it may be a hardlink) and replace it with an updated value
target_versionfile = os.path.join(base_dir, versionfile_source)
print "UPDATING", target_versionfile
os.unlink(target_versionfile)
f = open(target_versionfile, "w")
f.write(SHORT_VERSION_PY % self._versioneer_generated_versions)
f.close()
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
class cmd_update_files(Command):
description = "modify __init__.py and create _version.py"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
ipy = os.path.join(os.path.dirname(versionfile_source), "__init__.py")
print " creating %s" % versionfile_source
f = open(versionfile_source, "w")
f.write(LONG_VERSION_PY % {"DOLLAR": "$",
"TAG_PREFIX": tag_prefix,
"PARENTDIR_PREFIX": parentdir_prefix,
"VERSIONFILE_SOURCE": versionfile_source,
})
f.close()
try:
old = open(ipy, "r").read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print " appending to %s" % ipy
f = open(ipy, "a")
f.write(INIT_PY_SNIPPET)
f.close()
else:
print " %s unmodified" % ipy
do_vcs_install(versionfile_source, ipy)
def get_cmdclass():
return {'version': cmd_version,
'update_files': cmd_update_files,
'build': cmd_build,
'sdist': cmd_sdist,
} | ytmpc | /ytmpc-0.1.1-dirty.tar.gz/ytmpc-0.1.1-dirty/versioneer.py | versioneer.py |
import sys
import random
# from youtube_player import YoutubePlayer
# my player, uses urwid and vlc ^^
import gdata.youtube.service
# http://gdata-python-client.googlecode.com/hg/pydocs/gdata.youtube.service.html#YouTubeVideoQuery
# https://developers.google.com/youtube/1.0/developers_guide_python
#from ytdl import ytdl
#from ytstr import ytstr
# https://bitbucket.org/rg3/youtube-dl/wiki/Home
class YoutubeClient:
def __init__(self,keyword,q,order,num_results,mode=None,time=None):
keywords = ['search', 'download', 'stream', 'playlist']
if keyword in keywords: self.keyword = keyword
else: sys.exit('invalid keyword')
self.q = q
ordering_key = {'rating':'rating', 'viewcount':'viewCount', \
'relevance':'relevance','published':'published'}
ordering = ['rating','viewcount','relevance', 'published']
if order in ordering: self.order = ordering_key[order]
else: sys.exit('invalid ordering')
try: self.num = int(num_results)
except Exception as e: sys.exit('invalid number %s' %e)
if mode is not None:
self.mode = mode
else: self.mode = False
times_key = {'today':'today','week':'this_week',\
'month':'this_month','time':'all_time'}
times = ['today', 'week', 'month', 'time']
if time is not None and time in times:
self.time = times_key[time]
else:
self.time = False
self.client = gdata.youtube.service.YouTubeService()
self.pre_execute()
def get_playlist(self, feed, pl_format='file'):
urls = []
lines = []
lines.append('#EXTM3U\n')
lines.append("# Playlist created by youtube_mplayer_controller\n")
for entry in feed.entry:
url = str(entry.media.player.url)
urls.append(url)
lines.append('#EXTINF:0,%s\n%s\n'%(entry.title.text,url))
lines.append(urls[0])
if pl_format == 'file':
return lines
elif pl_format == 'urllist':
return urls
def playlist(self,feed):
lines = self.get_playlist(feed)
for line in lines:
print line
def search(self,feed):
for entry in feed.entry:
try:
print '\n[video] title: %s' % entry.title.text
print '[video] url: %s' % entry.media.player.url
print '[video] rating: %s' % entry.rating.average
print '[video] view count: %s' % entry.statistics.view_count
print '[video] id: %s' % entry.media.player.url.split(\
'watch?v=').pop().split("&")[0]
except Exception as e:
print('search failed:\nError: %s' % e)
def download(self,feed):
for entry in feed.entry:
try:
ytdl.main(entry.media.player.url)
except Exception as e:
print('download failed:\nError: %s' % e)
def gen_temp_file(self):
feed = self.feed
import tempfile
d = tempfile.mkdtemp()
self.temp_dir = d
import os
f = open(os.path.join(d,'test.m3u'), 'w+')
lines = self.get_playlist(feed)
self.temp_file = f
f.writelines(lines)
f.close()
def remove_temp_file(self):
import shutil
shutil.rmtree(self.temp_dir)
def stream(self, feed):
self.gen_temp_file()
import subprocess
#subprocess.Popen(["smplayer"])
#subprocess.Popen(["umplayer"])
subprocess.check_call(["smplayer", self.temp_file.name])
self.remove_temp_file()
def pre_execute(self):
query = gdata.youtube.service.YouTubeVideoQuery()
if self.q.startswith('u_'):
query.author = self.q[2:]
else:
query.vq = self.q
query.format = '5'
query.hd = True
if self.num != -1:
query.max_results = self.num
query.start_index = 1
query.racy = 'exclude'
query.orderby = self.order
if self.time: query.time = self.time
feed = self.client.YouTubeQuery(query)
if self.mode == 'shuffle': random.shuffle(feed.entry)
elif self.mode == 'reverse': feed.entry.reverse()
self.feed = feed
def execute(self):
feed = self.feed
command = self.keyword
if command == 'download': self.download(feed)
if command == 'stream':
if len(self.feed.entry) == 0:
print "search had 0 results, widen your search terms"
return 0
else:
self.stream(feed)
if command == 'search': self.search(feed)
if command == 'playlist': self.playlist(feed) | ytmpc | /ytmpc-0.1.1-dirty.tar.gz/ytmpc-0.1.1-dirty/ytplay/ytplayer.py | ytplayer.py |
import os
import re
import sys
from subprocess import CalledProcessError
from subprocess import check_output
from subprocess import STDOUT
# These will be filled in if git archive is run or by setup.py cmdclasses
GIT_REFS = 'tag: 1.5.6'
GIT_SHA1 = 'ebbdc96'
# Git describe gives us sha1, last version-like tag, and commits since then
CMD = "git describe --tags --dirty --always --long --match=[0-9]*[-.][0-9]*"
def get_version_from_git(path=None):
"""Try to parse version from git describe, fallback to git archive tags"""
tag, plus, suffix = "0.0", "untagged", ""
if not GIT_SHA1.startswith("$"):
# git archive or the cmdclasses below have filled in these strings
sha1 = GIT_SHA1
for ref_name in GIT_REFS.split(", "):
if ref_name.startswith("tag: "):
# git from 1.8.3 onwards labels archive tags "tag: TAGNAME"
tag, plus = ref_name[5:], "0"
else:
if path is None:
# If no path to git repo, choose the directory this file is in
path = os.path.dirname(os.path.abspath(__file__))
# output is TAG-NUM-gHEX[-dirty] or HEX[-dirty]
try:
cmd_out = check_output(CMD.split(), stderr=STDOUT, cwd=path)
except Exception as e:
sys.stderr.write(f"{type(e).__name__}: {str(e)}\n")
if isinstance(e, CalledProcessError):
sys.stderr.write("-> %s" % e.output.decode())
return "0.0+unknown", None, e
else:
out = cmd_out.decode().strip()
if out.endswith("-dirty"):
out = out[:-6]
suffix = ".dirty"
if "-" in out:
# There is a tag, extract it and the other pieces
match = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", out)
tag, plus, sha1 = match.groups()
else:
# No tag, just sha1
sha1 = out
# Replace dashes in tag for dots
tag = tag.replace("-", ".")
if plus != "0" or suffix:
# Not on a tag, add additional info
tag = "%(tag)s+%(plus)s.g%(sha1)s%(suffix)s" % locals()
return tag, sha1, None
__version__, git_sha1, git_error = get_version_from_git()
def get_cmdclass(build_py=None, sdist=None):
"""Create cmdclass dict to pass to setuptools.setup that will write a
_version_static.py file in our resultant sdist, wheel or egg"""
if build_py is None:
from setuptools.command.build_py import build_py
if sdist is None:
from setuptools.command.sdist import sdist
def make_version_static(base_dir, pkg):
vg = os.path.join(base_dir, pkg.split(".")[0], "_version_git.py")
if os.path.isfile(vg):
lines = open(vg).readlines()
with open(vg, "w") as f:
for line in lines:
# Replace GIT_* with static versions
if line.startswith("GIT_SHA1 = "):
f.write("GIT_SHA1 = '%s'\n" % git_sha1)
elif line.startswith("GIT_REFS = "):
f.write("GIT_REFS = 'tag: %s'\n" % __version__)
else:
f.write(line)
class BuildPy(build_py):
def run(self):
build_py.run(self)
for pkg in self.packages:
make_version_static(self.build_lib, pkg)
class Sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
for pkg in self.distribution.packages:
make_version_static(base_dir, pkg)
return dict(build_py=BuildPy, sdist=Sdist) | ytmusic-deleter | /ytmusic_deleter-1.5.6-py3-none-any.whl/ytmusic_deleter/_version_git.py | _version_git.py |
import logging
import os
import re
import sys
from pathlib import Path
from random import shuffle as unsort
from time import strftime
import click
import enlighten
from ytmusic_deleter import constants as const
from ytmusicapi import YTMusic
manager = enlighten.get_manager()
progress_bar = None
def ensure_auth(credential_dir):
global youtube_auth
headers_file_path = Path(credential_dir) / const.HEADERS_FILE
try:
logging.info(f'Looking for {headers_file_path}"')
youtube_auth = YTMusic(headers_file_path)
logging.info(f'Found {headers_file_path}"')
except (KeyError, AttributeError):
logging.info(f"Creating {const.HEADERS_FILE} file...")
youtube_auth = YTMusic(YTMusic.setup(filepath=headers_file_path))
logging.info(f'Created {headers_file_path}"')
@click.group()
@click.version_option()
@click.option(
"--log-dir",
"-l",
default=os.getcwd(),
help="Custom directory in which to write log files, instead of current working directory.",
)
@click.option(
"--credential-dir",
"-c",
default=os.getcwd(),
help="Custom directory in which to locate/create JSON credential file, instead of current working directory",
)
@click.option(
"--static-progress",
"-p",
is_flag=True,
help="Log the progress statically instead of an animated progress bar",
)
@click.pass_context
def cli(ctx, log_dir, credential_dir, static_progress):
"""Perform batch delete operations on your YouTube Music library."""
logging.basicConfig(
force=True,
level=logging.INFO,
format="[%(asctime)s] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
handlers=[
logging.FileHandler(
Path(log_dir) / f"ytmusic-deleter_{strftime('%Y-%m-%d')}.log"
),
logging.StreamHandler(sys.stdout),
],
)
ensure_auth(credential_dir)
ctx.ensure_object(dict)
ctx.obj["STATIC_PROGRESS"] = static_progress
@cli.command()
@click.option(
"--add-to-library",
"-a",
is_flag=True,
help="Add corresponding albums to your library before deleting them from uploads.",
)
@click.pass_context
def delete_uploads(ctx, add_to_library):
"""Delete all tracks that you have uploaded to your YT Music library."""
(albums_deleted, albums_total) = delete_uploaded_albums(ctx, add_to_library)
logging.info(f"Deleted {albums_deleted} out of {albums_total} uploaded albums.")
if (add_to_library) and albums_total - albums_deleted > 0:
logging.info(
f"\tRemaining {albums_total - albums_deleted} albums did not have a match in YouTube Music's online catalog."
)
(singles_deleted, singles_total) = delete_uploaded_singles(ctx)
logging.info(f"Deleted {singles_deleted} out of {singles_total} uploaded singles.")
def delete_uploaded_albums(ctx, add_to_library):
logging.info("Retrieving all uploaded albums...")
albums_deleted = 0
uploaded_albums = youtube_auth.get_library_upload_albums(sys.maxsize)
if not uploaded_albums:
return (albums_deleted, 0)
logging.info(f"Retrieved {len(uploaded_albums)} uploaded albums from your library.")
global progress_bar
progress_bar = manager.counter(
total=len(uploaded_albums),
desc="Albums Processed",
unit="albums",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
for album in uploaded_albums:
try:
artist = (
album["artists"][0]["name"]
if album.get("artists") # Using `get` ensures key exists and isn't []
else const.UNKNOWN_ARTIST
)
title = album["title"]
logging.info(f"Processing album: {artist} - {title}")
if add_to_library:
if artist == const.UNKNOWN_ARTIST:
logging.warn(
"\tAlbum is missing artist metadata. Skipping match search and will not delete."
)
update_progress(ctx)
continue
elif not add_album_to_library(artist, title):
logging.warn(
"\tNo match for uploaded album found in online catalog. Will not delete."
)
update_progress(ctx)
continue
response = youtube_auth.delete_upload_entity(album["browseId"])
if response == "STATUS_SUCCEEDED":
logging.info("\tDeleted album from uploads.")
albums_deleted += 1
else:
logging.error("\tFailed to delete album from uploads")
except (AttributeError, TypeError, KeyError) as e:
logging.error(f"\tEncountered exception processing album attribute: {e}")
update_progress(ctx)
return (albums_deleted, len(uploaded_albums))
def delete_uploaded_singles(ctx):
logging.info("Retrieving all uploaded singles...")
singles_deleted = 0
uploaded_singles = youtube_auth.get_library_upload_songs(sys.maxsize)
if not uploaded_singles:
return (singles_deleted, 0)
# Filter for songs that don't have an album, otherwise songs that
# were skipped in the first batch would get deleted here
uploaded_singles = [single for single in uploaded_singles if not single["album"]]
logging.info(
f"Retrieved {len(uploaded_singles)} uploaded singles from your library."
)
global progress_bar
progress_bar = manager.counter(
total=len(uploaded_singles),
desc="Singles Processed",
unit="singles",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
for single in uploaded_singles:
try:
artist = (
single["artist"][0]["name"]
if single.get("artist") # Using `get` ensures key exists and isn't []
else const.UNKNOWN_ARTIST
)
title = single["title"]
response = youtube_auth.delete_upload_entity(single["entityId"])
if response == "STATUS_SUCCEEDED":
logging.info(f"\tDeleted {artist} - {title}")
singles_deleted += 1
else:
logging.error(f"\tFailed to delete {artist} - {title}")
except (AttributeError, TypeError) as e:
logging.error(e)
update_progress(ctx)
return (singles_deleted, len(uploaded_singles))
def add_album_to_library(artist, title):
logging.info("\tSearching for album in online catalog...")
search_results = youtube_auth.search(f"{artist} {title}")
for result in search_results:
# Find the first album for which the artist and album title are substrings
if result["resultType"] == "album" and match_found(result, artist, title):
catalog_album = youtube_auth.get_album(result["browseId"])
logging.info(
f"\tFound matching album \"{catalog_album['artist'][0]['name'] if 'artist' in catalog_album else ''}"
f" - {catalog_album['title']}\" in YouTube Music. Adding to library..."
)
success = youtube_auth.rate_playlist(
catalog_album["audioPlaylistId"], const.LIKE
)
if success:
logging.info("\tAdded album to library.")
else:
logging.error("\tFailed to add album to library")
return True
return False
def match_found(result, artist, title):
try:
resultArtist = str(result["artist"]).lower()
except KeyError:
resultArtist = str(result["artists"][0] if "artists" in result else "").lower()
try:
resultTitle = str(result["title"]).lower()
except KeyError:
resultTitle = ""
artist = artist.lower()
title = title.lower()
if artist in resultArtist and title in resultTitle:
return True
else:
# Try again but strip out parentheticals and quotes
resultTitle = re.sub(r"\(.*?\)|\[.*?\]|\"|\'", "", resultTitle).strip()
title = re.sub(r"\(.*?\)|\[.*?\]|\"|\'", "", title).strip()
return artist in resultArtist and title in resultTitle
@cli.command()
@click.pass_context
def remove_library(ctx):
"""Remove all tracks that you have added to your library from within YouTube Music."""
logging.info("Retrieving all library albums...")
try:
library_albums = youtube_auth.get_library_albums(sys.maxsize)
logging.info(f"Retrieved {len(library_albums)} albums from your library.")
except Exception:
logging.exception("Failed to get library albums.")
library_albums = []
global progress_bar
progress_bar = manager.counter(
total=len(library_albums),
desc="Albums Processed",
unit="albums",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
albums_removed = remove_library_albums(ctx, library_albums)
logging.info("Retrieving all singles...")
# Aside from actual singles, these might also be individual songs from an album that were added to your library
try:
library_songs = youtube_auth.get_library_songs(sys.maxsize)
logging.info(f"Retrieved {len(library_songs)} singles from your library.")
except Exception:
logging.exception("Failed to get library singles.")
library_songs = []
# Filter out songs where album is None (rare but seen here: https://github.com/apastel/ytmusic-deleter/issues/12)
filtered_songs = list(filter(lambda song: song["album"], library_songs))
if len(library_songs) - len(filtered_songs) > 0:
logging.info(
f"{len(library_songs) - len(filtered_songs)} songs are not part of an album and won't be deleted."
)
# Filter for unique album IDs so that for each song, we can just remove the album it's a part of
album_unique_songs = list({v["album"]["id"]: v for v in filtered_songs}.values())
progress_bar = manager.counter(
total=len(album_unique_songs),
desc="Singles Processed",
unit="singles",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
albums_removed += remove_library_albums_by_song(ctx, album_unique_songs)
logging.info(
f"Removed {albums_removed} out of {len(library_albums) + len(album_unique_songs)} albums from your library."
)
def remove_library_albums(ctx, albums):
albums_removed = 0
for album in albums:
if remove_album(album["browseId"]):
albums_removed += 1
update_progress(ctx)
return albums_removed
def remove_library_albums_by_song(ctx, songs):
albums_removed = 0
for song in songs:
if remove_album(song["album"]["id"]):
albums_removed += 1
update_progress(ctx)
return albums_removed
def remove_album(browseId):
try:
album = youtube_auth.get_album(browseId)
except Exception:
logging.exception(
f"\tFailed to remove album with ID {browseId} from your library, as it could not be retrieved."
)
return False
artist = album["artists"][0]["name"] if "artists" in album else const.UNKNOWN_ARTIST
title = album["title"]
logging.info(f"Processing album: {artist} - {title}")
response = youtube_auth.rate_playlist(album["audioPlaylistId"], const.INDIFFERENT)
if response:
logging.info(f"\tRemoved {artist} - {title} from your library.")
return True
else:
logging.error(f"\tFailed to remove {artist} - {title} from your library.")
return False
@cli.command()
@click.pass_context
def unlike_all(ctx):
"""Reset all Thumbs Up ratings back to neutral"""
logging.info("Retrieving all your liked songs...")
try:
your_likes = youtube_auth.get_liked_songs(sys.maxsize)
except Exception:
logging.error("\tNo liked songs found or error retrieving liked songs.")
return False
logging.info(f"\tRetrieved {len(your_likes['tracks'])} liked songs.")
logging.info("Begin unliking songs...")
global progress_bar
progress_bar = manager.counter(
total=len(your_likes["tracks"]),
desc="Songs Unliked",
unit="songs",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
for track in your_likes["tracks"]:
artist = (
track["artists"][0]["name"] if "artists" in track else const.UNKNOWN_ARTIST
)
title = track["title"]
logging.info(f"Processing track: {artist} - {title}")
if track["album"] is None:
logging.info(
"\tSkipping deletion as this might be a YouTube video and not a YouTube Music song."
)
else:
logging.info("\tRemoved track from Likes.")
youtube_auth.rate_song(track["videoId"], const.INDIFFERENT)
update_progress(ctx)
logging.info("Finished unliking all songs.")
@cli.command()
@click.pass_context
def delete_playlists(ctx):
"""Delete all playlists"""
logging.info("Retrieving all your playlists...")
library_playlists = youtube_auth.get_library_playlists(sys.maxsize)
# Can't delete "Your Likes" playlist
library_playlists = list(
filter(lambda playlist: playlist["playlistId"] != "LM", library_playlists)
)
logging.info(f"\tRetrieved {len(library_playlists)} playlists.")
logging.info("Begin deleting playlists...")
global progress_bar
progress_bar = manager.counter(
total=len(library_playlists),
desc="Playlists Deleted",
unit="playlists",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
for playlist in library_playlists:
logging.info(f"Processing playlist: {playlist['title']}")
try:
response = youtube_auth.delete_playlist(playlist["playlistId"])
if response:
logging.info(
f"\tRemoved playlist \"{playlist['title']}\" from your library."
)
else:
logging.error(
f"\tFailed to remove playlist \"{playlist['title']}\" from your library."
)
except Exception:
logging.error(
f"\tCould not delete playlist {playlist['title']}. It might be a YT Music curated playlist."
)
update_progress(ctx)
logging.info("Finished deleting all playlists")
@cli.command()
@click.pass_context
def delete_all(ctx):
"""Executes delete-uploads, remove-library, unlike-all, and delete_playlists"""
ctx.invoke(delete_uploads)
ctx.invoke(remove_library)
ctx.invoke(unlike_all)
ctx.invoke(delete_playlists)
@cli.command()
@click.argument("playlist_titles", nargs=-1, required=True)
@click.option(
"--shuffle", "-s", is_flag=True, help="Shuffle the playlist(s) instead of sorting."
)
@click.pass_context
def sort_playlist(ctx, shuffle, playlist_titles):
"""Sort or shuffle one or more playlists alphabetically by artist and by album"""
all_playlists = youtube_auth.get_library_playlists(sys.maxsize)
lowercase_playlist_titles = [title.lower() for title in playlist_titles]
selected_playlist_list = [
playlist
for playlist in all_playlists
if playlist["title"].lower() in lowercase_playlist_titles
]
for selected_playlist in selected_playlist_list:
logging.info(f'Processing playlist: {selected_playlist["title"]}')
playlist = youtube_auth.get_playlist(
selected_playlist["playlistId"], sys.maxsize
)
current_tracklist = [t for t in playlist["tracks"]]
if shuffle:
logging.info(f"\tPlaylist: {selected_playlist['title']} will be shuffled")
desired_tracklist = [t for t in playlist["tracks"]]
unsort(desired_tracklist)
else:
desired_tracklist = [
t for t in sorted(playlist["tracks"], key=lambda t: make_sort_key(t))
]
global progress_bar
progress_bar = manager.counter(
total=len(desired_tracklist),
desc=f'\'{selected_playlist["title"]}\' Tracks {"Shuffled" if shuffle else "Sorted"}',
unit="tracks",
enabled=not ctx.obj["STATIC_PROGRESS"],
)
for cur_track in desired_tracklist:
cur_idx = desired_tracklist.index(cur_track)
track_after = current_tracklist[cur_idx]
logging.debug( # No way to actually enable debug logging yet
f"Moving {cur_track['artists'][0]['name']} - {cur_track['title']} "
f"before {track_after['artists'][0]['name']} - {track_after['title']}"
)
if cur_track != track_after:
try:
response = youtube_auth.edit_playlist(
playlist["id"],
moveItem=(
cur_track["setVideoId"],
track_after["setVideoId"],
),
)
if not response:
logging.error(
f"Failed to move {cur_track['artists'][0]['name']} - {cur_track['title']} "
f"before {track_after['artists'][0]['name']} - {track_after['title']}"
)
except Exception:
logging.error(
f"Failed to move {cur_track['artists'][0]['name']} - {cur_track['title']} "
f"before {track_after['artists'][0]['name']} - {track_after['title']}"
)
current_tracklist.remove(cur_track)
current_tracklist.insert(cur_idx, cur_track)
update_progress(ctx)
not_found_playlists = []
for title in lowercase_playlist_titles:
if title not in [x["title"].lower() for x in selected_playlist_list]:
not_found_playlists.append(title)
if not_found_playlists:
raise click.BadParameter(
f'No playlists found named "{", ".join(not_found_playlists)}". Double-check your playlist name(s) '
'(or surround them "with quotes") and try again.'
)
def make_sort_key(track):
try:
artists = track["artists"]
artist = artists[0]["name"].lower() if artists else "z"
album = track["album"]
album_title = album["name"] if album else "z"
return (re.sub(r"^(the |a )", "", artist), album_title, track["title"])
except Exception:
logging.exception(f"Track {track} could not be sorted.")
raise
def update_progress(ctx):
global progress_bar
progress_bar.update()
if ctx.obj["STATIC_PROGRESS"]:
logging.info(
f"Total complete: {round(progress_bar.count / progress_bar.total * 100)}%"
)
if __name__ == "__main__":
cli() | ytmusic-deleter | /ytmusic_deleter-1.5.6-py3-none-any.whl/ytmusic_deleter/cli.py | cli.py |
ytmusicapi: Unofficial API for YouTube Music
############################################
.. image:: https://img.shields.io/pypi/dm/ytmusicapi?style=flat-square
:alt: PyPI Downloads
:target: https://pypi.org/project/ytmusicapi/
.. image:: https://badges.gitter.im/sigma67/ytmusicapi.svg
:alt: Ask questions at https://gitter.im/sigma67/ytmusicapi
:target: https://gitter.im/sigma67/ytmusicapi
.. image:: https://img.shields.io/codecov/c/github/sigma67/ytmusicapi?style=flat-square
:alt: Code coverage
:target: https://codecov.io/gh/sigma67/ytmusicapi
.. image:: https://img.shields.io/github/v/release/sigma67/ytmusicapi?style=flat-square
:alt: Latest release
:target: https://github.com/sigma67/ytmusicapi/releases/latest
.. image:: https://img.shields.io/github/commits-since/sigma67/ytmusicapi/latest?style=flat-square
:alt: Commits since latest release
:target: https://github.com/sigma67/ytmusicapi/commits
ytmusicapi is a Python 3 library to send requests to the YouTube Music API.
It emulates YouTube Music web client requests using the user's cookie data for authentication.
.. features
Features
--------
| **Browsing**:
* search (including all filters) and suggestions
* get artist information and releases (songs, videos, albums, singles, related artists)
* get user information (videos, playlists)
* get albums
* get song metadata
* get watch playlists (next songs when you press play/radio/shuffle in YouTube Music)
* get song lyrics
| **Exploring music**:
* get moods and genres playlists
* get latest charts (globally and per country)
| **Library management**:
* get library contents: playlists, songs, artists, albums and subscriptions
* add/remove library content: rate songs, albums and playlists, subscribe/unsubscribe artists
* get and modify play history
| **Playlists**:
* create and delete playlists
* modify playlists: edit metadata, add/move/remove tracks
* get playlist contents
* get playlist suggestions
| **Uploads**:
* upload songs and remove them again
* list uploaded songs, artists and albums
| **Localization**:
* all regions are supported (see `locations FAQ <https://ytmusicapi.readthedocs.io/en/stable/faq.html#which-values-can-i-use-for-locations>`__
* 16 languages are supported (see `languages FAQ <https://ytmusicapi.readthedocs.io/en/stable/faq.html#which-values-can-i-use-for-languages>`__
If you find something missing or broken,
check the `FAQ <https://ytmusicapi.readthedocs.io/en/stable/faq.html>`__ or
feel free to create an `issue <https://github.com/sigma67/ytmusicapi/issues/new/choose>`__.
Usage
------
.. code-block:: python
from ytmusicapi import YTMusic
yt = YTMusic('oauth.json')
playlistId = yt.create_playlist('test', 'test description')
search_results = yt.search('Oasis Wonderwall')
yt.add_playlist_items(playlistId, [search_results[0]['videoId']])
The `tests <https://github.com/sigma67/ytmusicapi/blob/master/tests/test.py>`_ are also a great source of usage examples.
.. end-features
Requirements
==============
- Python 3.8 or higher - https://www.python.org
Setup and Usage
===============
See the `Documentation <https://ytmusicapi.readthedocs.io/en/latest/usage.html>`_ for detailed instructions
Contributing
==============
Pull requests are welcome. There are still some features that are not yet implemented.
Please, refer to `CONTRIBUTING.rst <https://github.com/sigma67/ytmusicapi/blob/master/CONTRIBUTING.rst>`_ for guidance.
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/README.rst | README.rst |
Contributing to ytmusicapi
##########################
Issues
-------
Please make sure to include sufficient details for reproducing your issue.
This includes the version of the library used as well as detailed instructions for reproduction.
If needed, please include the YouTube Music API response as well by debugging the API (responses
may differ based on the user account, so this helps with reproducing new issues).
Pull requests
--------------
Please open an issue before submitting, unless it's just a typo or some other small error.
Before making changes to the code, install the development requirements using
.. code-block::
pip install -e .[dev]
Before committing, stage your files and run style and linter checks:
.. code-block::
git add .
pre-commit run
pre-commit will unstage any files that do not pass. Fix the issues until all checks pass and commit.
Code structure
---------------
The folder ``ytmusicapi`` contains the main library which is distributed to the users.
Each main library function in ``ytmusic.py`` is covered by a test in the ``tests`` folder.
If you want to contribute a new function, please create a corresponding unittest. | ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
FAQ
=====
Frequently asked questions for ``ytmusicapi``. Contributions are welcome, please
`submit a PR <https://github.com/sigma67/ytmusicapi/pulls>`_.
Setup
------------
My library results are empty even though I set up my cookie correctly.
***********************************************************************
Please make sure that you don't have multiple Google accounts. ``ytmusicapi`` might be returning
results from a different account which is currently empty. You can set your account using ``X-Goog-AuthUser``
in your headers file (numeric index) or by providing the id of a brand account with ``ytmusic = YTMusic(headers, "1234..")``.
For more details see the :doc:`reference`.
Usage
-----------------------
How do I add a song, album, artist or playlist to my library?
***********************************************************************
- **songs**: `edit_song_library_status <Reference.html#ytmusicapi.YTMusic.edit_song_library_status>`__ .
Liking a song using `rate_song <Reference.html#ytmusicapi.YTMusic.rate_song>`__
does *not* add it to your library, only to your liked songs playlist.
- **albums, playlists**: `rate_playlist <Reference.html#ytmusicapi.YTMusic.rate_playlist>`__
- **artists**: `subscribe_artists <Reference.html#ytmusicapi.YTMusic.subscribe_artists>`__ .
This will add the artist to your Subscriptions tab. The Artists tab is determined by the songs/albums you have
added to your library.
How can I get the radio playlist for a song, video, playlist or album?
***********************************************************************
- **songs, videos**: ``RDAMVM`` + ``videoId``
- **playlists, albums**: ``RDAMPL`` + ``playlistId``
How can I get the shuffle playlist for a playlist or album?
***********************************************************************
Use `get_watch_playlist_shuffle <Reference.html#ytmusicapi.YTMusic.get_watch_playlist_shuffle>`__
with the ``playlistId`` or ``audioPlaylistId`` (albums).
How can I get all my public playlists in a single request?
***********************************************************************
Call `get_user_playlists <Reference.html#ytmusicapi.YTMusic.get_user_playlists>`__
with your own ``channelId``.
Can I download songs?
***********************************************************************
You can use `youtube-dl <https://github.com/ytdl-org/youtube-dl/>`_ for this purpose.
How do I package ytmusicapi with ``pyinstaller``?
*************************************************
To package ytmusicapi correctly, you need to add the locales files to your executable.
You can use ``--add-data path-to-ytmusicapi/locales`` or ``--collect-all ytmusicapi`` to accomplish this.
YouTube Music API Internals
------------------------------
Is there a difference between songs and videos?
***********************************************************************
Yes. Videos are regular videos from YouTube, which can be uploaded by any user. Songs are actual songs uploaded by artists.
You can also add songs to your library, while you can't add videos.
Is there a rate limit?
***********************************************************************
There most certainly is, although you shouldn't run into it during normal usage.
See related issues:
- `Creating playlists <https://github.com/sigma67/ytmusicapi/issues/19>`_
- `Uploads <https://github.com/sigma67/ytmusicapi/issues/6>`_
What is a browseId?
***********************************************************************
A ``browseId`` is an internal, globally unique identifier used by YouTube Music for browsable content.
Which videoTypes exist and what do they mean?
***********************************************************************
``videoType`` is prefixed with ``MUSIC_VIDEO_TYPE_``, i.e. ``MUSIC_VIDEO_TYPE_OMV``.
Currently the following variants of ``videoType`` are known:
- ``OMV``: Original Music Video - uploaded by original artist with actual video content
- ``UGC``: User Generated Content - uploaded by regular YouTube user
- ``ATV``: High quality song uploaded by original artist with cover image
- ``OFFICIAL_SOURCE_MUSIC``: Official video content, but not for a single track
Why is ytmusicapi returning more results than requested with the limit parameter?
*********************************************************************************
YouTube Music always returns increments of a specific pagination value, usually between 20 and 100 items at a time.
This is the case if a ytmusicapi method supports the ``limit`` parameter. The default value of the ``limit`` parameter
indicates the server-side pagination increment. ytmusicapi will keep fetching continuations from the server until it has
reached at least the ``limit`` parameter, and return all of these results.
Which values can I use for languages?
*************************************
The `language` parameter determines the language of the returned results.
``ytmusicapi`` only supports a subset of the languages supported by YouTube Music, as translations need to be done manually.
Contributions are welcome, see `here for instructions <https://github.com/sigma67/ytmusicapi/tree/master/ytmusicapi/locales>`__.
For the list of values you can use for the ``language`` parameter, see below:
.. raw:: html
<details>
<summary><a>Supported locations</a></summary>
.. container::
.. list-table::
* - Language
- Value
* - Arabic
- ar
* - German
- de
* - English (default)
- en
* - Spanish
- es
* - French
- fr
* - Hindi
- hi
* - Italian
- it
* - Japanese
- ja
* - Korean
- ko
* - Dutch
- nl
* - Portuguese
- pt
* - Russian
- ru
* - Turkish
- tr
* - Urdu
- ur
* - Chinese (Mainland)
- zh_CN
* - Chinese (Taiwan)
- zh_TW
.. raw:: html
</details>
Which values can I use for locations?
*************************************
Pick a value from the list below for your desired location and pass it using the ``location`` parameter.
.. raw:: html
<details>
<summary><a>Supported locations</a></summary>
.. container::
.. list-table::
* - Location
- Value
* - Algeria
- DZ
* - Argentina
- AR
* - Australia
- AU
* - Austria
- AT
* - Azerbaijan
- AZ
* - Bahrain
- BH
* - Bangladesh
- BD
* - Belarus
- BY
* - Belgium
- BE
* - Bolivia
- BO
* - Bosnia and Herzegovina
- BA
* - Brazil
- BR
* - Bulgaria
- BG
* - Cambodia
- KH
* - Canada
- CA
* - Chile
- CL
* - Colombia
- CO
* - Costa Rica
- CR
* - Croatia
- HR
* - Cyprus
- CY
* - Czechia
- CZ
* - Denmark
- DK
* - Dominican Republic
- DO
* - Ecuador
- EC
* - Egypt
- EG
* - El Salvador
- SV
* - Estonia
- EE
* - Finland
- FI
* - France
- FR
* - Georgia
- GE
* - Germany
- DE
* - Ghana
- GH
* - Greece
- GR
* - Guatemala
- GT
* - Honduras
- HN
* - Hong Kong
- HK
* - Hungary
- HU
* - Iceland
- IS
* - India
- IN
* - Indonesia
- ID
* - Iraq
- IQ
* - Ireland
- IE
* - Israel
- IL
* - Italy
- IT
* - Jamaica
- JM
* - Japan
- JP
* - Jordan
- JO
* - Kazakhstan
- KZ
* - Kenya
- KE
* - Kuwait
- KW
* - Laos
- LA
* - Latvia
- LV
* - Lebanon
- LB
* - Libya
- LY
* - Liechtenstein
- LI
* - Lithuania
- LT
* - Luxembourg
- LU
* - Malaysia
- MY
* - Malta
- MT
* - Mexico
- MX
* - Montenegro
- ME
* - Morocco
- MA
* - Nepal
- NP
* - Netherlands
- NL
* - New Zealand
- NZ
* - Nicaragua
- NI
* - Nigeria
- NG
* - North Macedonia
- MK
* - Norway
- NO
* - Oman
- OM
* - Pakistan
- PK
* - Panama
- PA
* - Papua New Guinea
- PG
* - Paraguay
- PY
* - Peru
- PE
* - Philippines
- PH
* - Poland
- PL
* - Portugal
- PT
* - Puerto Rico
- PR
* - Qatar
- QA
* - Romania
- RO
* - Russia
- RU
* - Saudi Arabia
- SA
* - Senegal
- SN
* - Serbia
- RS
* - Singapore
- SG
* - Slovakia
- SK
* - Slovenia
- SI
* - South Africa
- ZA
* - South Korea
- KR
* - Spain
- ES
* - Sri Lanka
- LK
* - Sweden
- SE
* - Switzerland
- CH
* - Taiwan
- TW
* - Tanzania
- TZ
* - Thailand
- TH
* - Tunisia
- TN
* - Turkey
- TR
* - Uganda
- UG
* - Ukraine
- UA
* - United Arab Emirates
- AE
* - United Kingdom
- GB
* - United States
- US
* - Uruguay
- UY
* - Venezuela
- VE
* - Vietnam
- VN
* - Yemen
- YE
* - Zimbabwe
- ZW
.. raw:: html
</details>
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/docs/source/faq.rst | faq.rst |
Usage
=======
Unauthenticated
---------------
Unauthenticated requests for retrieving playlist content or searching:
.. code-block:: python
from ytmusicapi import YTMusic
ytmusic = YTMusic()
If an endpoint requires authentication you will receive an error:
``Please provide authentication before using this function``
Authenticated
-------------
For authenticated requests you need to set up your credentials first: :doc:`Setup <setup/index>`
After you have created the authentication JSON, you can instantiate the class:
.. code-block:: python
from ytmusicapi import YTMusic
ytmusic = YTMusic("oauth.json")
With the :code:`ytmusic` instance you can now perform authenticated requests:
.. code-block:: python
playlistId = ytmusic.create_playlist("test", "test description")
search_results = ytmusic.search("Oasis Wonderwall")
ytmusic.add_playlist_items(playlistId, [search_results[0]['videoId']])
Brand accounts
##############
To send requests as a brand account, there is no need to change authentication credentials.
Simply provide the ID of the brand account when instantiating YTMusic.
You can get the ID from https://myaccount.google.com/ after selecting your brand account
(https://myaccount.google.com/b/21_digit_number).
Example:
.. code-block:: python
from ytmusicapi import YTMusic
ytmusic = YTMusic("oauth.json", "101234161234936123473")
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/docs/source/usage.rst | usage.rst |
ytmusicapi: Unofficial API for YouTube Music
==================================================
The purpose of this library is to automate interactions with `YouTube Music <https://music.youtube.com/>`_,
such as retrieving your library content, managing playlists and uploading songs.
To achieve this, it emulates web requests that would occur if you performed the same actions in your web browser.
**This project is not supported nor endorsed by Google**
.. include:: ../../README.rst
:start-after: features
:end-before: end-features
To **get started**, read the :doc:`setup instructions <setup/index>`.
For a **complete documentation** of available functions, see the :doc:`Reference <reference>`.
Contents
--------
.. toctree::
:hidden:
Home <self>
.. toctree::
setup/index
usage
reference
faq
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/docs/source/index.rst | index.rst |
Reference
==================
Reference for the YTMusic class.
.. currentmodule:: ytmusicapi
.. autoclass:: YTMusic
.. automethod:: YTMusic.__init__
Setup
-----
See also the :doc:`Setup <setup/index>` page
.. autofunction:: setup
.. autofunction:: setup_oauth
Search
------
.. automethod:: YTMusic.search
.. automethod:: YTMusic.get_search_suggestions
Browsing
--------
.. automethod:: YTMusic.get_home
.. automethod:: YTMusic.get_artist
.. automethod:: YTMusic.get_artist_albums
.. automethod:: YTMusic.get_album
.. automethod:: YTMusic.get_album_browse_id
.. automethod:: YTMusic.get_user
.. automethod:: YTMusic.get_user_playlists
.. automethod:: YTMusic.get_song
.. automethod:: YTMusic.get_song_related
.. automethod:: YTMusic.get_lyrics
.. automethod:: YTMusic.get_tasteprofile
.. automethod:: YTMusic.set_tasteprofile
Explore
--------
.. automethod:: YTMusic.get_mood_categories
.. automethod:: YTMusic.get_mood_playlists
.. automethod:: YTMusic.get_charts
Watch
--------
.. automethod:: YTMusic.get_watch_playlist
Library
-------
.. automethod:: YTMusic.get_library_playlists
.. automethod:: YTMusic.get_library_songs
.. automethod:: YTMusic.get_library_albums
.. automethod:: YTMusic.get_library_artists
.. automethod:: YTMusic.get_library_subscriptions
.. automethod:: YTMusic.get_liked_songs
.. automethod:: YTMusic.get_history
.. automethod:: YTMusic.add_history_item
.. automethod:: YTMusic.remove_history_items
.. automethod:: YTMusic.rate_song
.. automethod:: YTMusic.edit_song_library_status
.. automethod:: YTMusic.rate_playlist
.. automethod:: YTMusic.subscribe_artists
.. automethod:: YTMusic.unsubscribe_artists
Playlists
---------
.. automethod:: YTMusic.get_playlist
.. automethod:: YTMusic.create_playlist
.. automethod:: YTMusic.edit_playlist
.. automethod:: YTMusic.delete_playlist
.. automethod:: YTMusic.add_playlist_items
.. automethod:: YTMusic.remove_playlist_items
Uploads
-------
.. automethod:: YTMusic.get_library_upload_songs
.. automethod:: YTMusic.get_library_upload_artists
.. automethod:: YTMusic.get_library_upload_albums
.. automethod:: YTMusic.get_library_upload_artist
.. automethod:: YTMusic.get_library_upload_album
.. automethod:: YTMusic.upload_song
.. automethod:: YTMusic.delete_upload_entity
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/docs/source/reference.rst | reference.rst |
Browser authentication
======================
This method of authentication emulates your browser session by reusing its request headers.
Follow the instructions to have your browser's YouTube Music session request headers parsed
to a ``ytmusicapi`` configuration file.
Copy authentication headers
---------------------------
To run authenticated requests, set it up by first copying your request headers from an authenticated POST request in your browser.
To do so, follow these steps:
- Open a new tab
- Open the developer tools (Ctrl-Shift-I) and select the "Network" tab
- Go to https://music.youtube.com and ensure you are logged in
- Find an authenticated POST request. The simplest way is to filter by ``/browse`` using the search bar of the developer tools.
If you don't see the request, try scrolling down a bit or clicking on the library button in the top bar.
.. raw:: html
<details open>
<summary><a>Firefox (recommended)</a></summary>
.. container::
- Verify that the request looks like this: **Status** 200, **Method** POST, **Domain** music.youtube.com, **File** ``browse?...``
- Copy the request headers (right click > copy > copy request headers)
.. raw:: html
</details>
.. raw:: html
<details>
<summary><a>Chromium (Chrome/Edge)</a></summary>
.. container::
- Verify that the request looks like this: **Status** 200, **Name** ``browse?...``
- Click on the Name of any matching request. In the "Headers" tab, scroll to the section "Request headers" and copy everything starting from "accept: \*/\*" to the end of the section
.. raw:: html
</details><br>
Using the headers in your project
---------------------------------
To set up your project, open a console and call
.. code-block:: bash
ytmusicapi browser
Follow the instructions and paste the request headers to the terminal input.
If you don't want terminal interaction in your project, you can pass the request headers with the ``headers_raw`` parameter:
.. code-block:: python
import ytmusicapi
ytmusicapi.setup(filepath="browser.json", headers_raw="<headers copied above>")
The function returns a JSON string with the credentials needed for :doc:`../usage`. Alternatively, if you passed the filepath parameter as described above,
a file called ``browser.json`` will be created in the current directory, which you can pass to ``YTMusic()`` for authentication.
These credentials remain valid as long as your YTMusic browser session is valid (about 2 years unless you log out).
.. raw:: html
<details>
<summary><a>MacOS special pasting instructions</a></summary>
.. container::
- MacOS terminal application can only accept 1024 characters pasted to std input. To paste in terminal, a small utility called pbpaste must be used.
- In terminal just prefix the command used to run the script you created above with
``pbpaste |``
- This will pipe the contents of the clipboard into the script just as if you had pasted it from the edit menu.
.. raw:: html
</details><br>
Manual file creation
--------------------
Alternatively, you can create your own file ``browser.json`` and paste the cookie:
.. literalinclude:: headers_auth.json.example
:language: JSON
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/docs/source/setup/browser.rst | browser.rst |
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
# Hello and thanks for using ytmusicapi! Please fill out the issue template below. Issues not following this template will be closed without comment. If you have a question only, please use GitHub discussions or gitter (linked in README).
~~~~~~~~~~~~~
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Additional context**
Add any other context about the problem here, such as a code sample.
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/.github/ISSUE_TEMPLATE/bug_report.md | bug_report.md |
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
# Hello and thanks for using ytmusicapi! Please fill out the issue template below. Issues not following this template will be closed without comment. If you have a question only, please use GitHub discussions or gitter (linked in README).
~~~~~~~~~~~~~
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
| ytmusicapi | /ytmusicapi-1.2.1.tar.gz/ytmusicapi-1.2.1/.github/ISSUE_TEMPLATE/feature_request.md | feature_request.md |
======
YTool
======
A simple tool to set values in yaml files preserving format and comments.
This command line tool is based on `ruamel.yaml <https://pypi.org/project/ruamel.yaml>`__
^^^^^^^^^^^^^^^^^^^^^^^^
Install:
^^^^^^^^^^^^^^^^^^^^^^^^
Ytool can be installed simply by running:
.. code-block:: bash
$ pip install ytool
^^^^^^^^^^^^^^^^^^^^^^^^
Example:
^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
$ ytool -f some.yaml -s some.string.key value -d float_key 9.9 -i int.key.path 10
^^^^^^^^^^^^^^^^^^^^^^^^
Help:
^^^^^^^^^^^^^^^^^^^^^^^^
.. code-block:: bash
$ usage: ytool [-h] --file FILE [--set-string path value] [--set-int path value]
[--set-float path value] [--output OUTPUT] [--edit-file]
[--verbose]
Set values in yaml file preserving format and comments.
optional arguments:
-h, --help show this help message and exit
--file FILE, -f FILE Name of the chart file to change
--set-string path value, -s path value
Set string value for path
--set-int path value, -i path value
Set integer value for path
--set-float path value, -d path value
Set float value for path
--output OUTPUT, -o OUTPUT
Name of output file
--edit-file, -e Edit input file directly
--verbose, -v Print debug information to stdout
---------------
What is Codacy?
---------------
`Codacy <https://www.codacy.com/>`__ is an Automated Code Review Tool
that monitors your technical debt, helps you improve your code quality,
teaches best practices to your developers, and helps you save time in
Code Reviews.
^^^^^^^^^^^^^^^^^^^^^^^^
Among Codacy’s features:
^^^^^^^^^^^^^^^^^^^^^^^^
- Identify new Static Analysis issues
- Commit and Pull Request Analysis with GitHub, BitBucket/Stash, GitLab
(and also direct git repositories)
- Auto-comments on Commits and Pull Requests
- Integrations with Slack, HipChat, Jira, YouTrack
- Track issues in Code Style, Security, Error Proneness, Performance,
Unused Code and other categories
Codacy also helps keep track of Code Coverage, Code Duplication, and
Code Complexity.
Codacy supports PHP, Python, Ruby, Java, JavaScript, and Scala, among
others.
^^^^^^^^^^^^^^^^^^^^
Free for Open Source
^^^^^^^^^^^^^^^^^^^^
Codacy is free for Open Source projects.
| ytool | /ytool-0.4.2.tar.gz/ytool-0.4.2/README.rst | README.rst |
ytools
======
Command-line tool and ``python`` module for ...
- validating ``json`` and ``yaml`` files against a ``json-schema`` in
``json`` or ``yaml`` format
- selectively dumping nodes from ``yaml`` (or ``json``) documents in
``yaml`` or ``json`` format.
Features
- Output ``yaml`` as ``json`` or ``python``
- Output ``json`` as ``yaml`` or ``python`` (provided that there are no
duplicate mapping entry in the ``json`` source)
- Extract particular nodes from ``yaml`` and ``json`` files.
- If ``yaml`` is used as output format (default) the output is a
valid ``yaml`` document.
- Validate ``yaml`` and ``json`` documents.
- The ``json-schema`` can be provided in ``yaml`` format as well,
which improves readability and writability.
- Preserve order of mapping-keys in ``yaml`` and ``json`` output.
- Multi-document support
- Multiple input files
- ... as well as multiple ``yaml`` documents within a file
- ... and a combination of both
Installation
``pip install ytools``
Description
| For selecting nodes, ``ytools`` uses ``jsonpath_ng.ext``.
| The syntax is documented at
https://pypi.python.org/pypi/jsonpath-ng/1.4.2.
| By default (if no path is provided), complete input documents are
dumped in ``yaml`` format (path defaults to ``'$'``).
| This can be used to get ``yaml`` output for ``json`` documents or vice
versa:
- ``python ytools.py input.json``
... for converting json to yaml, or ...
- ``python ytools.py input.yaml -f json``
... for the opposite direction.
| Additionally, ``yaml`` and ``json`` documents can be validated against
a ``json-schema`` which may be provided in ``yaml`` or ``json``
format.
| ``schema.yaml`` is a sample for ``json-schema`` in ``yaml`` format.
``ytools -h``
::
$ ytools -h
Usage: /usr/local/bin/ytools [OPTION] -p JSONPATH_EXPRESSION FILE...
Dumps data from json (or yaml) documents in yaml format. Command line wrapper
for jsonpath-ng.
Options:
-h, --help show this help message and exit
-p PATH, --json-path=PATH
Syntax for jsonpath expression:
https://pypi.python.org/pypi/jsonpath-ng/1.4.2
-f OUTPUTFORMAT, --output-format=OUTPUTFORMAT
Output format. Can be "yaml", "json" or "python".
[default: yaml]
-y YAML_OPTIONS, --yaml-options=YAML_OPTIONS
kwargs for yaml.dump (pyYaml) as yaml.mapping (for
experts). [default: '{explicit_start: True,
explicit_end: True, allow_unicode: True}']
-j JSON_OPTIONS, --json-options=JSON_OPTIONS
kwargs for json.dumps as yaml.mapping (for experts).
[default: '{indent: 2, encoding: utf-8}']
-v SCHEMA, --validate=SCHEMA
Validate documents against json-schema
--encoding=ENCODING Set encoding of input documents (if different from
utf-8)
Command Line Samples
--------------------
The samples are based on the following data.
Sample Data
~~~~~~~~~~~
Input Data
^^^^^^^^^^
``input.yaml``:
.. code:: yaml
documents:
- title: Some document title
sections:
- title: Some section title
description: Some section description
text: Some text for some section
chapters:
- title: Some chapter title
description: Some chapter description
text: The text of some chapter
- title: Some other chapter title
description:
- descriptionparagraph1: Some description for other chapter
- descriptionparagraph2: Some description for other chapter
text: The text of some other chapter
- title: Some other section title
description: Some other section description
text: Some text for some other section
chapters:
- title: About encoding
description: "Some German: äöü,ÄÖÜ,ß"
Schema for validating ``input.yaml``
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
``schema.yaml``
.. code:: yaml
$schema: "http://json-schema.org/schema#"
definitions:
chapter:
type: object
properties:
title: {type: string}
description:
oneOf: [{type: string}, {type: array}]
text: {type: string}
additionalProperties: false
required: [title, description]
chapters:
type: array
items: {$ref: "#/definitions/chapter"}
additionalItems: false
section:
type: object
properties:
title: {type: string}
description: {type: string}
text: {type: string}
chapters: {$ref: "#/definitions/chapters"}
additionalProperties: false
required: [title, description]
sections:
type: array
items: {$ref: "#/definitions/section"}
additionalItems: false
document:
type: object
properties:
title: {type: string}
description: {type: string}
sections: {$ref: "#/definitions/sections"}
additionalProperties: false
required: [title, description]
documents:
type: array
items: {$ref: "#/definitions/document"}
additionalItems: false
type: object
properties:
documents: {$ref: "#/definitions/documents"}
additionalProperties: false
Outputs
~~~~~~~
When not providing a jsonpath expression
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If you don't provide a jsonpath expression using the ``-p`` option
ytools uses ``'$'`` as default and therefore dumps the complete input:
::
🎼 ytools input.yaml
---
documents:
- title: Some document title
description: The document's description
sections:
- title: Some section title
description: Some section description
text: Some text for some section
chapters:
- {title: Some chapter title, description: Some chapter description, text: The
text of some chapter}
- title: Some other chapter title
description:
- {descriptionparagraph1: Some description for other chapter}
- {descriptionparagraph2: Some description for other chapter}
text: The text of some other chapter
- title: Some other section title
description: Some other section description
text: Some text for some other section
chapters:
- {title: About encoding, description: 'Some German: äöü,ÄÖÜ,ß'}
...
Output format ``yaml`` (default)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
With the ``yaml`` output format by default, each match is output as a
separate ``yaml`` document. This way we achieve that the output is valid
``yaml``.:
::
🎼 ytools -p '$..chapters[*].description' input.yaml
--- Some chapter description
...
---
- {descriptionparagraph1: Some description for other chapter}
- {descriptionparagraph2: Some description for other chapter}
...
--- 'Some German: äöü,ÄÖÜ,ß'
...
If you want different behavior you can set ``explicit_start`` and/or
``explicit_end`` to ``False``. In this case the output will no longer be
guaranteed to be valid ``yaml``:
::
🎼 ytools -p '$..chapters[*].description' input.yaml --yaml-options='{explicit_start: False, explicit_end: False}'
Some chapter description
...
- {descriptionparagraph1: Some description for other chapter}
- {descriptionparagraph2: Some description for other chapter}
'Some German: äöü,ÄÖÜ,ß'
Other output formats
^^^^^^^^^^^^^^^^^^^^
Unfortunately, when using the ``json`` or ``python`` output format the
same selection can't produce valid ``json`` or ``python`` output. That's
because neither json nor python support the concept of (multiple)
documents:
::
🎼 ytools -p '$..chapters[*].description' input.yaml -f json --json-options='{indent: 4}'
"Some chapter description"
[
{
"descriptionparagraph1": "Some description for other chapter"
},
{
"descriptionparagraph2": "Some description for other chapter"
}
]
"Some German: \u00e4\u00f6\u00fc,\u00c4\u00d6Y,\u00df"
That's definitely not valid json.
Neither is the following valid python:
::
🎼 ytools -p '$..chapters[*].description' input.yaml -f python
Some chapter description
[{'descriptionparagraph1': 'Some description for other chapter'}, {'descriptionparagraph2': 'Some description for other chapter'}]
Some German: äöü,ÄÖÜ,ß
So **if you ever want to process the output automatically please stick
to ``yaml``**.
Python Samples
--------------
Sample Data
~~~~~~~~~~~
Input Data
^^^^^^^^^^
``test/sampledata.yaml``
.. code:: yaml
name: 'my_name'
date: '2017-10-01T10:55:00Z'
metrics:
percentage:
value: 87
trend: stable
Schema for Validating
^^^^^^^^^^^^^^^^^^^^^
``test/sampleschema.yaml``
.. code:: yaml
type: object
properties:
name: { type: string }
date: { type: string, format: date-time }
metrics:
type: object
properties:
percentage:
type: object
properties:
value:
type: number
minimum: 0
maximum: 100
trend: { type: string, enum: [down, stable, up] }
additionalProperties: false
required: [value, trend]
additionalProperties: false
required: [percentage]
additionalProperties: false
required: [name, date, metrics]
Validation
~~~~~~~~~~
.. code:: python
ytools.validate("test/sampleschema.yaml", ["test/sampledata.yaml"])
... will not output anything because of successful validation. Play
around if you want to see failing validation - it's quite easy to make
it fail ;-)
Dumping
~~~~~~~
.. code:: python
ytools.dump("test/sampledata.yaml", "$.metrics", yaml_options="default_flow_style: false")
... will output ...
::
---
percentage:
value: 87
trend: stable
...
| ytools | /ytools-1.0.0.tar.gz/ytools-1.0.0/README.rst | README.rst |
########
ytools3
########
.. start short_desc
**Library for validating `yaml` files against schema and selectively dumping nodes from `yaml` (or `json`) documents in `yaml` or `json` format.**
.. end short_desc
This is a port of ``ytools`` ( https://github.com/yaccob/ytools ), which was Python 2 only, to Python 3.
.. start shields
.. list-table::
:stub-columns: 1
:widths: 10 90
* - Docs
- |docs| |docs_check|
* - Tests
- |travis| |actions_windows| |actions_macos| |coveralls| |codefactor|
* - PyPI
- |pypi-version| |supported-versions| |supported-implementations| |wheel|
* - Activity
- |commits-latest| |commits-since| |maintained|
* - Other
- |license| |language| |requires| |pre_commit|
.. |docs| image:: https://img.shields.io/readthedocs/ytools3/latest?logo=read-the-docs
:target: https://ytools3.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. |docs_check| image:: https://github.com/domdfcoding/ytools3/workflows/Docs%20Check/badge.svg
:target: https://github.com/domdfcoding/ytools3/actions?query=workflow%3A%22Docs+Check%22
:alt: Docs Check Status
.. |travis| image:: https://img.shields.io/travis/com/domdfcoding/ytools3/master?logo=travis
:target: https://travis-ci.com/domdfcoding/ytools3
:alt: Travis Build Status
.. |actions_windows| image:: https://github.com/domdfcoding/ytools3/workflows/Windows%20Tests/badge.svg
:target: https://github.com/domdfcoding/ytools3/actions?query=workflow%3A%22Windows+Tests%22
:alt: Windows Tests Status
.. |actions_macos| image:: https://github.com/domdfcoding/ytools3/workflows/macOS%20Tests/badge.svg
:target: https://github.com/domdfcoding/ytools3/actions?query=workflow%3A%22macOS+Tests%22
:alt: macOS Tests Status
.. |requires| image:: https://requires.io/github/domdfcoding/ytools3/requirements.svg?branch=master
:target: https://requires.io/github/domdfcoding/ytools3/requirements/?branch=master
:alt: Requirements Status
.. |coveralls| image:: https://img.shields.io/coveralls/github/domdfcoding/ytools3/master?logo=coveralls
:target: https://coveralls.io/github/domdfcoding/ytools3?branch=master
:alt: Coverage
.. |codefactor| image:: https://img.shields.io/codefactor/grade/github/domdfcoding/ytools3?logo=codefactor
:target: https://www.codefactor.io/repository/github/domdfcoding/ytools3
:alt: CodeFactor Grade
.. |pypi-version| image:: https://img.shields.io/pypi/v/ytools3
:target: https://pypi.org/project/ytools3/
:alt: PyPI - Package Version
.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/ytools3?logo=python&logoColor=white
:target: https://pypi.org/project/ytools3/
:alt: PyPI - Supported Python Versions
.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/ytools3
:target: https://pypi.org/project/ytools3/
:alt: PyPI - Supported Implementations
.. |wheel| image:: https://img.shields.io/pypi/wheel/ytools3
:target: https://pypi.org/project/ytools3/
:alt: PyPI - Wheel
.. |license| image:: https://img.shields.io/github/license/domdfcoding/ytools3
:target: https://github.com/domdfcoding/ytools3/blob/master/LICENSE
:alt: License
.. |language| image:: https://img.shields.io/github/languages/top/domdfcoding/ytools3
:alt: GitHub top language
.. |commits-since| image:: https://img.shields.io/github/commits-since/domdfcoding/ytools3/v3.0.1
:target: https://github.com/domdfcoding/ytools3/pulse
:alt: GitHub commits since tagged version
.. |commits-latest| image:: https://img.shields.io/github/last-commit/domdfcoding/ytools3
:target: https://github.com/domdfcoding/ytools3/commit/master
:alt: GitHub last commit
.. |maintained| image:: https://img.shields.io/maintenance/yes/2020
:alt: Maintenance
.. |pre_commit| image:: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
:target: https://github.com/pre-commit/pre-commit
:alt: pre-commit
.. end shields
Features
---------
* Output ``yaml`` as ``json`` or ``python``
* Output ``json`` as ``yaml`` or ``python`` (provided that there are no duplicate mapping entry in the ``json`` source)
* Extract particular nodes from ``yaml`` and ``json`` files.
+ If ``yaml`` is used as output format (default) the output is a valid ``yaml`` document.
* Validate ``yaml`` and ``json`` documents.
+ The ``json-schema`` can be provided in ``yaml`` format as well, which improves readability and writability.
* Preserve order of mapping-keys in ``yaml`` and ``json`` output.
* Multi-document support
+ Multiple input files
- ... as well as multiple ``yaml`` documents within a file
- ... and a combination of both
Installation
--------------
.. start installation
``ytools3`` can be installed from PyPI.
To install with ``pip``:
.. code-block:: bash
$ python -m pip install ytools3
.. end installation
| ytools3 | /ytools3-3.0.1.tar.gz/ytools3-3.0.1/README.rst | README.rst |
# stdlib
import argparse
import optparse
import pathlib
import sys
# 3rd party
import jsonschema # type: ignore
import yaml
from yaml import scanner
# this package
from ytools import __version__, dump, optiondefaults, validate
def main(argv):
parser = argparse.ArgumentParser(
prog=argv[0],
description="""\
Dumps data from json (or yaml) documents in yaml format.
Command line wrapper for jsonpath-ng.
Find more information at https://github.com/yaccob/ytools
""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# usage='Usage: %prog [OPTION] -p JSONPATH_EXPRESSION FILE...',
# version='%s %s' % ("%prog", __version__),
parser.add_argument("datafile", type=pathlib.Path, nargs=1, metavar="FILE")
parser.add_argument(
"-p",
"--json-path",
dest="path",
default='$',
help="Syntax for jsonpath expression: https://pypi.python.org/pypi/jsonpath-ng/1.4.2"
)
parser.add_argument(
"-f",
"--output-format",
metavar="OUTPUTFORMAT",
dest="format",
choices={"yaml", "json", "python"},
default="yaml",
help='Output format. Can be "yaml", "json" or "python". ' # [default: %default]
)
parser.add_argument(
"-y",
"--yaml-options",
dest="yaml_options",
default=optiondefaults["yaml"],
help="kwargs for yaml.dump (pyYaml) as yaml.mapping (for experts). " # [default: '%default']
)
parser.add_argument(
"-j",
"--json-options",
dest="json_options",
default=optiondefaults["json"],
help="kwargs for json.dumps as yaml.mapping (for experts). " # [default: '%default']
)
parser.add_argument(
"-v", "--validate", metavar="SCHEMA", dest="schema", help="Validate documents against json-schema"
)
parser.add_argument(
"--encoding",
dest="encoding",
default="utf-8",
help="Set encoding of input documents (if different from utf-8)"
)
args = parser.parse_args(argv[1:])
try:
if args.schema:
validate(args.schema, args.datafile, encoding=args.encoding)
del (args.__dict__["schema"])
dump(args.datafile, **args.__dict__)
except jsonschema.exceptions.ValidationError as e:
sys.stderr.write(f"{e.path}: {e.message}\n")
sys.stderr.write(f" document-path: {list(e.absolute_path)}\n")
sys.stderr.write(f" schema-path: {list(e.absolute_schema_path)}\n")
sys.exit(1)
except yaml.scanner.ScannerError as e:
sys.stderr.write(f"{e}\n")
if __name__ == "__main__":
main(sys.argv) | ytools3 | /ytools3-3.0.1.tar.gz/ytools3-3.0.1/ytools/__main__.py | __main__.py |
#
# Copyright (c) 2020 Dominic Davis-Foster <[email protected]>
# Copyright (c) Jakob Stemberger <[email protected]>
# Apache 2.0 Licensed
# See LICENSE for more information
#
# stdlib
import collections
import json
import pathlib
from typing import Callable, Dict, Iterable, Union
# 3rd party
import jsonschema # type: ignore
import yaml
from jsonpath_ng import ext as jsonpath # type: ignore
from typing_extensions import TypedDict
from yaml import constructor, resolver
__author__: str = "Dominic Davis-Foster"
__copyright__: str = "2020 Dominic Davis-Foster"
__license__: str = "Apache2.0"
__version__: str = "3.0.1"
__email__: str = "[email protected]"
__all__ = ["validate", "dump", "optiondefaults", "__version__"]
optiondefaults = {
"yaml": "{explicit_start: True, explicit_end: True, allow_unicode: True}",
"json": "{indent: 2, encoding: utf-8}",
"python": "{}"
}
def dict_constructor(loader, node) -> Dict:
return dict(loader.construct_pairs(node))
def orderedDict_constructor(loader, node, deep=False):
data = collections.OrderedDict()
yield data
if isinstance(node, yaml.MappingNode):
loader.flatten_mapping(node)
data.update(collections.OrderedDict(loader.construct_pairs(node, deep)))
class Encoder(TypedDict):
dumper: Callable
kwargs: str
yaml_constructor: Callable
def validate(
schemafile: Union[str, pathlib.Path],
datafiles: Iterable[Union[str, pathlib.Path]],
encoding: str = "utf-8",
) -> None:
"""
Validate the given datafiles using a schema
:param schemafile: The ``json`` or ``yaml`` formatted schema to validate with
:param datafiles: An iterable of ``json`` or ``yaml`` files to validate
:param encoding: Encoding to open the files with.
:type encoding: str
"""
schemafile = pathlib.Path(schemafile)
schema = yaml.safe_load(schemafile.read_text(encoding=encoding))
for filename in datafiles:
for document in yaml.load_all(
pathlib.Path(filename).read_text(encoding=encoding),
Loader=yaml.FullLoader,
):
try:
jsonschema.validate(document, schema, format_checker=jsonschema.FormatChecker())
except jsonschema.exceptions.ValidationError as e:
e.filename = str(filename)
raise e
def dump(
datafile: Union[str, pathlib.Path],
path: str = '$',
format: str = "yaml", # pylint: disable=redefined-builtin
yaml_options: str = optiondefaults["yaml"],
json_options: str = optiondefaults["json"],
encoding: str = "utf-8",
) -> None:
"""
:param datafile:
:type datafile:
:param path:
:type path:
:param format:
:type format:
:param yaml_options:
:type yaml_options:
:param json_options:
:type json_options:
:param encoding: Encoding to open the files with.
:type encoding: str
"""
encoders: Dict[str, Encoder] = {
"yaml": {
"dumper": yaml.dump,
"kwargs": yaml_options,
"yaml_constructor": orderedDict_constructor,
},
"json": {
"dumper": json.dumps,
"kwargs": json_options,
"yaml_constructor": orderedDict_constructor,
},
"python": {
"dumper": (lambda x, **kwargs: x),
"kwargs": "{}",
"yaml_constructor": dict_constructor,
},
}
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, encoders[format]["yaml_constructor"])
if format == "json":
yaml.add_constructor("tag:yaml.org,2002:timestamp", yaml.constructor.SafeConstructor.construct_yaml_str)
yaml.add_representer(collections.OrderedDict, lambda dumper, data: dumper.represent_dict(data.items()))
documents = yaml.load_all(
pathlib.Path(datafile).read_text(encoding=encoding),
Loader=yaml.FullLoader,
)
formatoptions = dict(yaml.safe_load(optiondefaults[format]), **yaml.safe_load(encoders[format]["kwargs"]))
for document in documents:
for match in jsonpath.parse(path).find(document):
print(encoders[format]["dumper"](match.value, **formatoptions)) | ytools3 | /ytools3-3.0.1.tar.gz/ytools3-3.0.1/ytools/__init__.py | __init__.py |
<p align="center">
<img src="docs/_static/logo/medium.png">
</p>
[](https://ytopt.readthedocs.io/en/latest/?badge=latest)
# What is Ytopt?
TODO
# Documentation
Ytopt documentation is on : [ReadTheDocs](https://ytopt.readthedocs.io)
# Directory structure
```
search/
directory for search applications
```
# Install instructions
```
git clone https://github.com/ytopt-team/ytopt.git
cd ytopt/
pip install -e .
```
# How do I learn more?
* Documentation: https://ytopt.readthedocs.io
* GitHub repository: https://github.com/ytopt-team/ytopt
# Who is responsible?
The core Ytopt team is at Argonne National Laboratory:
TODO
# How can I participate?
TODO
# Acknowledgements
TODO
# Copyright and license
TBD | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/README.md | README.md |
import tensorflow as tf
import json
import math
from mpi4py import MPI
from ppo.baselines.common import set_global_seeds
from ppo.baselines import bench
import os.path as osp
from ppo.baselines import logger
from ppo.gym_ytopt.envs import YtoptEnvParallel
from ppo.gym_ytopt.agent import pposgd_simple, lstm_policy, mlp_policy
import ppo.baselines.common.tf_util as U
from math import inf
class Train:
"""Training class for ppo. Sequential evaluation of workers.
Args:
problem (Problem): a problem from one of the benchmarks
policy_fn (func): return a policy instance
num_episodes_per_batch (int): number of episodes per batch of update (SYNC)
num_episodes (int): total number of episodes to sample
seed (int): seed of the current agent
"""
def __init__(self, problem, rank_workers, policy_fn,
num_episodes=math.inf,
seed=2018,
comm=None,
tags=None,
max_time=inf):
self.rank_workers = rank_workers
self.problem = problem
self.seed = seed
self.policy_fn = policy_fn
self.num_episodes_per_batch = len(rank_workers)
self.num_episodes = num_episodes
self.max_time = max_time
self.comm = MPI.COMM_WORLD if comm is None else comm
self.tags = tags
def train(self):
num_episodes = self.num_episodes
seed = self.seed
policy_fn = self.policy_fn
rank = self.comm.Get_rank()
sess = U.single_threaded_session()
sess.__enter__()
if rank == 0:
logger.configure()
else:
logger.configure(format_strs=[])
workerseed = seed + 10000 * self.comm.Get_rank() if seed is not None else None
set_global_seeds(workerseed)
# MAKE ENV_NAS
# num_episodes = 1000
episode_length = len(self.problem.space.keys())
timesteps_per_actorbatch = episode_length * self.num_episodes_per_batch
# num_timesteps = timesteps_per_actorbatch * num_episodes
env = YtoptEnvParallel(rank_workers=self.rank_workers, problem=self.problem)
print(f'[A, r={rank}] learning')
pposgd_simple.learn(env, policy_fn,
# max_timesteps=int(num_timesteps),
max_seconds=self.max_time,
timesteps_per_actorbatch=timesteps_per_actorbatch,
clip_param=0.2,
entcoeff=0.01,
optim_epochs=4,
optim_stepsize=1e-3,
optim_batchsize=15,
gamma=0.99,
lam=0.95,
schedule='constant',
comm=self.comm,
tags=self.tags,
rank_workers=self.rank_workers
)
env.close() | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/agent/train.py | train.py |
from ppo.baselines.common import Dataset, explained_variance, fmt_row, zipsame
from ppo.baselines import logger
import ppo.baselines.common.tf_util as U
import tensorflow as tf, numpy as np
import time
from ppo.baselines.common.mpi_adam import MpiAdam
from ppo.baselines.common.mpi_moments import mpi_moments
from mpi4py import MPI
from collections import deque
from pprint import pformat
def traj_segment_generator(pi, env, horizon, stochastic):
t = 0
ac = env.action_space.sample() # not used, just so we have the datatype
new = True # marks if we're on first timestep of an episode
ob = env.reset()
cur_ep_ret = 0 # return in current episode
cur_ep_len = 0 # len of current episode
ep_rets = [] # returns of completed episodes in this segment
ep_lens = [] # lengths of ...
ts_i2n_ep = {}
# Initialize history arrays
obs = np.array([ob for _ in range(horizon)])
rews = np.zeros(horizon, 'float32')
vpreds = np.zeros(horizon, 'float32')
news = np.zeros(horizon, 'int32')
acs = np.array([ac for _ in range(horizon)])
prevacs = acs.copy()
num_evals = 0
while True:
prevac = ac
ac, vpred = pi.act(stochastic, ob)
# Slight weirdness here because we need value function at time T
# before returning segment [0, T-1] so we get the correct
# terminal value
if t > 0 and t % horizon == 0:
while num_evals > 0:
data, rew = env.get_reward_ready()
index = data['index']
rews[index] = rew
num_evals -= 1
ep_rets[ts_i2n_ep[index]-1] += rew
ts_i2n_ep = {}
data = {"ob" : obs, "rew" : rews, "vpred" : vpreds, "new" : news,
"ac" : acs, "prevac" : prevacs, "nextvpred": vpred * (1 - new),
"ep_rets" : ep_rets, "ep_lens" : ep_lens}
yield data
# Be careful!!! if you change the downstream algorithm to aggregate
# several of these batches, then be sure to do a deepcopy
ep_rets = []
ep_lens = []
i = t % horizon
obs[i] = ob
vpreds[i] = vpred
news[i] = new
acs[i] = ac
prevacs[i] = prevac
# observ, reward, episode_over, meta -> {}
ob, rew, new, _ = env.step(ac, i)
rews[i] = rew
cur_ep_ret += rew if rew != None else 0
cur_ep_len += 1
if new:
num_evals += 1
ts_i2n_ep[i] = num_evals
ep_rets.append(cur_ep_ret)
ep_lens.append(cur_ep_len)
cur_ep_ret = 0
cur_ep_len = 0
ob = env.reset()
t += 1
def add_vtarg_and_adv(seg, gamma, lam):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
new = np.append(seg["new"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpred = np.append(seg["vpred"], seg["nextvpred"])
T = len(seg["rew"])
seg["adv"] = gaelam = np.empty(T, 'float32')
rew = seg["rew"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1-new[t+1]
delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamret"] = seg["adv"] + seg["vpred"]
def learn(env, policy_fn, *,
timesteps_per_actorbatch, # timesteps per actor per update
clip_param, entcoeff, # clipping parameter epsilon, entropy coeff
optim_epochs, optim_stepsize, optim_batchsize,# optimization hypers
gamma, lam, # advantage estimation
comm, tags, rank_workers,
max_timesteps=0, max_episodes=0, max_iters=0, max_seconds=0, # time constraint
callback=None, # you can do anything in the callback, since it takes locals(), globals()
adam_epsilon=1e-5,
schedule='constant', # annealing for stepsize parameters (epsilon and adam)
):
# Setup losses and stuff
# ----------------------------------------
ob_space = env.observation_space
ac_space = env.action_space
pi = policy_fn("pi", ob_space, ac_space, comm=comm) # Construct network for new policy
oldpi = policy_fn("oldpi", ob_space, ac_space, comm=comm) # Network for old policy
atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)
ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return
lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule
clip_param = clip_param * lrmult # Annealed cliping parameter epislon
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
kloldnew = oldpi.pd.kl(pi.pd)
ent = pi.pd.entropy()
meankl = tf.reduce_mean(kloldnew)
meanent = tf.reduce_mean(ent)
pol_entpen = (-entcoeff) * meanent
ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # pnew / pold
surr1 = ratio * atarg # surrogate from conservative policy iteration
surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg #
pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2)) # PPO's pessimistic surrogate (L^CLIP)
vf_loss = tf.reduce_mean(tf.square(pi.vpred - ret))
total_loss = pol_surr + pol_entpen + vf_loss
losses = [pol_surr, pol_entpen, vf_loss, meankl, meanent]
loss_names = ["pol_surr", "pol_entpen", "vf_loss", "kl", "ent"]
var_list = pi.get_trainable_variables()
lossandgrad = U.function([ob, ac, atarg, ret, lrmult], losses + [U.flatgrad(total_loss, var_list)])
adam = MpiAdam(var_list, epsilon=adam_epsilon, comm=comm)
assign_old_eq_new = U.function([],[], updates=[tf.assign(oldv, newv)
for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])
compute_losses = U.function([ob, ac, atarg, ret, lrmult], losses)
U.initialize()
adam.sync()
# Prepare for rollouts
# ----------------------------------------
seg_gen = traj_segment_generator(pi, env, timesteps_per_actorbatch, stochastic=True)
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=100) # rolling buffer for episode lengths
rewbuffer = deque(maxlen=100) # rolling buffer for episode rewards
cond = sum([max_iters>0, max_timesteps>0, max_episodes>0, max_seconds>0])
assert cond==1, f"Only one time constraint permitted: cond={cond}, max_iters={max_iters}, max_timesteps={max_timesteps}, max_episodes={max_episodes}, max_seconds={max_seconds}"
while True:
if callback: callback(locals(), globals())
if max_timesteps and timesteps_so_far >= max_timesteps:
break
elif max_episodes and episodes_so_far >= max_episodes:
break
elif max_iters and iters_so_far >= max_iters:
break
elif max_seconds and time.time() - tstart >= max_seconds:
comm = MPI.COMM_WORLD
for dest in rank_workers:
print(f'~~~> dest: {dest}, rank: {comm.rank}')
comm.send(None, dest=dest, tag=tags.EXIT)
break
if schedule == 'constant':
cur_lrmult = 1.0
elif schedule == 'linear':
cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)
else:
raise NotImplementedError
logger.log("********** Iteration %i ************"%iters_so_far)
seg = seg_gen.__next__()
add_vtarg_and_adv(seg, gamma, lam)
# ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg["tdlamret"]
vpredbefore = seg["vpred"] # predicted value function before udpate
atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate
d = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=not pi.recurrent)
# optim_batchsize = optim_batchsize or ob.shape[0]
optim_batchsize = ob.shape[0]
if hasattr(pi, "ob_rms"): pi.ob_rms.update(ob) # update running mean/std for policy
assign_old_eq_new() # set old parameter values to new parameter values
logger.log("Optimizing...")
logger.log(fmt_row(13, loss_names))
# Here we do a bunch of optimization epochs over the data
for _ in range(optim_epochs):
losses = [] # list of tuples, each of which gives the loss for a minibatch
for batch in d.iterate_once(optim_batchsize):
*newlosses, g = lossandgrad(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
adam.update(g, optim_stepsize * cur_lrmult)
losses.append(newlosses)
logger.log(fmt_row(13, np.mean(losses, axis=0)))
logger.log("Evaluating losses...")
losses = []
for batch in d.iterate_once(optim_batchsize):
newlosses = compute_losses(batch["ob"], batch["ac"], batch["atarg"], batch["vtarg"], cur_lrmult)
losses.append(newlosses)
meanlosses,_,_ = mpi_moments(losses, axis=0, comm=comm)
logger.log(fmt_row(13, meanlosses))
for (lossval, name) in zipsame(meanlosses, loss_names):
logger.record_tabular("loss_"+name, lossval)
logger.record_tabular("ev_tdlam_before", explained_variance(vpredbefore, tdlamret))
lrlocal = (seg["ep_lens"], seg["ep_rets"]) # local values
listoflrpairs = comm.allgather(lrlocal) # list of tuples
lens, rews = map(flatten_lists, zip(*listoflrpairs))
lenbuffer.extend(lens)
rewbuffer.extend(rews)
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
iters_so_far += 1
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if comm.Get_rank()==0:
logger.dump_tabular()
return pi
def flatten_lists(listoflists):
return [el for list_ in listoflists for el in list_] | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/agent/pposgd_simple.py | pposgd_simple.py |
from ppo.baselines.common.mpi_running_mean_std import RunningMeanStd
import ppo.baselines.common.tf_util as U
import tensorflow as tf
import gym
from ppo.baselines.common.distributions import make_pdtype
class LstmPolicy(object):
recurrent = False
def __init__(self, name, *args, **kwargs):
self.comm = kwargs.get('comm')
kwargs.pop('comm', None)
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, num_units, gaussian_fixed_var=True):
assert isinstance(ob_space, gym.spaces.Box)
num_hid_layers = 1
hid_size = 10
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
full_path_is_done = tf.get_variable("full_path_is_done", dtype=tf.bool,
initializer=True, trainable=False)
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape, comm=self.comm)
with tf.variable_scope('vf'):
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
lstm = tf.contrib.rnn.LSTMCell(
num_units=num_units,
name=f'rnn_cell',
initializer=U.normc_initializer(1.0))
init_lstm_state = lstm.zero_state(1, dtype=tf.float32)
v_lstm_state = tf.get_variable("v_lstm_state", dtype=tf.float32,
initializer=init_lstm_state, trainable=False)
ba_state = tf.get_variable("ba_state", dtype=tf.float32,
initializer=init_lstm_state, trainable=False)
assign_ba_state = tf.cond(full_path_is_done,
lambda: tf.assign(ba_state, v_lstm_state), # TRUE
lambda: tf.assign(ba_state, ba_state)) # FALSE
lstm_state = tf.cond(tf.equal(tf.shape(ob)[0], 1),
lambda: v_lstm_state,
lambda: ba_state)
assign_fpid = tf.assign(full_path_is_done, tf.math.greater(tf.shape(ob)[0], 1))
with tf.control_dependencies([assign_ba_state]):
last_out = tf.expand_dims(last_out, 0)
last_out, lstm_new_state = tf.nn.dynamic_rnn(lstm,
last_out,
initial_state=init_lstm_state,
dtype=tf.float32)
assign_new_state = tf.assign(v_lstm_state, lstm_new_state)
last_out = tf.squeeze(last_out, axis=[0])
with tf.control_dependencies([assign_new_state, assign_fpid]):
self.vpred = tf.layers.dense(last_out, 1, name='final', kernel_initializer=U.normc_initializer(1.0))[:,0]
with tf.variable_scope('pol'):
last_out = obz
lstm = tf.contrib.rnn.LSTMCell(
num_units=num_units,
name=f'rnn_cell',
initializer=U.normc_initializer(1.0),
state_is_tuple=False)
init_lstm_state = lstm.zero_state(1, dtype=tf.float32)
v_lstm_state = tf.get_variable("v_lstm_state", dtype=tf.float32,
initializer=init_lstm_state, trainable=False)
ba_state = tf.get_variable("ba_state", dtype=tf.float32,
initializer=init_lstm_state, trainable=False)
assign_ba_state = tf.cond(full_path_is_done,
lambda: tf.assign(ba_state, v_lstm_state), # TRUE
lambda: tf.assign(ba_state, ba_state)) # FALSE
lstm_state = tf.cond(tf.equal(tf.shape(ob)[0], 1),
lambda: v_lstm_state,
lambda: ba_state)
assign_fpid = tf.assign(full_path_is_done, tf.math.greater(tf.shape(ob)[0], 1))
with tf.control_dependencies([assign_ba_state]):
last_out = tf.expand_dims(last_out, 0)
last_out, lstm_new_state = tf.nn.dynamic_rnn(lstm,
last_out,
initial_state=lstm_state,
dtype=tf.float32)
assign_new_state = tf.assign(v_lstm_state, lstm_new_state)
last_out = tf.squeeze(last_out, axis=[0])
with tf.control_dependencies([assign_new_state, assign_fpid]):
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
mean = tf.layers.dense(last_out, pdtype.param_shape()[0]//2, name='final', kernel_initializer=U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
else:
pdparam = tf.layers.dense(last_out, pdtype.param_shape()[0], name='final', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
def policy_fn(name, ob_space, ac_space, comm=None): #pylint: disable=W0613
return LstmPolicy(name=name, ob_space=ob_space, ac_space=ac_space, num_units=32, comm=comm) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/agent/lstm_policy.py | lstm_policy.py |
from ppo.baselines.common.mpi_running_mean_std import RunningMeanStd
import ppo.baselines.common.tf_util as U
import tensorflow as tf
import gym
from ppo.baselines.common.distributions import make_pdtype
class MlpPolicy(object):
recurrent = False
def __init__(self, name, *args, **kwargs):
with tf.variable_scope(name):
self._init(*args, **kwargs)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
with tf.variable_scope("obfilter"):
self.ob_rms = RunningMeanStd(shape=ob_space.shape)
with tf.variable_scope('vf'):
obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name="fc%i"%(i+1), kernel_initializer=U.normc_initializer(1.0)))
self.vpred = tf.layers.dense(last_out, 1, name='final', kernel_initializer=U.normc_initializer(1.0))[:,0]
with tf.variable_scope('pol'):
last_out = obz
for i in range(num_hid_layers):
last_out = tf.nn.tanh(tf.layers.dense(last_out, hid_size, name='fc%i'%(i+1), kernel_initializer=U.normc_initializer(1.0)))
if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
mean = tf.layers.dense(last_out, pdtype.param_shape()[0]//2, name='final', kernel_initializer=U.normc_initializer(0.01))
logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
else:
pdparam = tf.layers.dense(last_out, pdtype.param_shape()[0], name='final', kernel_initializer=U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(pdparam)
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
def policy_fn(name, ob_space, ac_space): #pylint: disable=W0613
return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=10, num_hid_layers=1) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/agent/mlp_policy.py | mlp_policy.py |
import gym
import numpy as np
from gym import spaces
from mpi4py import MPI
import time
def enum(*sequential, **named):
"""Handy way to fake an enumerated type in Python
http://stackoverflow.com/questions/36932/how-can-i-represent-an-enum-in-python
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
return type('Enum', (), enums)
# Define MPI message tags
tags = enum('READY', 'DONE', 'EXIT', 'START')
comm = MPI.COMM_WORLD # get MPI communicator object
rank = comm.rank # rank of this process
status = MPI.Status() # get MPI status object
class YtoptEnvParallel(gym.Env):
eval_counter = 0
start_time = time.time()
def __init__(self, rank_workers, problem):
self.rank_workers = rank_workers
self.problem = problem
self.observation_space = spaces.Box(low=-0, high=self.max_action_size, shape=(1,), dtype=np.float32)
self.action_space = spaces.Discrete(self.max_action_size)
self._state = np.array([1.])
self.action_buffer = []
self.num_timesteps = len(problem.params)
def step(self, action, index):
self.action_buffer.append(action)
if len(self.action_buffer) < self.num_timesteps:
terminal = False
reward = 0
self._state = np.array([float(action)])
return self._state, reward, terminal, {}
conv_action = self.index2tokens(self.action_buffer)
terminal = True
self.action_buffer = []
self._state = np.array([1.])
# EXECUTION
data = comm.recv(source=MPI.ANY_SOURCE, tag=tags.READY, status=status)
source = status.Get_source()
print(f'[ENV] rank: {rank} found available worker -> rank={source}')
task = {}
task['x'] = conv_action
task['index'] = index
task['eval_counter'] = YtoptEnvParallel.eval_counter
task['rank_master'] = rank
elapsed_time = float(time.time() - YtoptEnvParallel.start_time)
task['start_time'] = elapsed_time
# print('Sending task {} to worker {}'.format (eval_counter, source))
print(f'[ENV] rank: {rank} send task to rank={source}')
comm.send(task, dest=source, tag=tags.START)
YtoptEnvParallel.eval_counter += 1
# ob, reward, terminal
# cost is minimization when ppo is maximization
return self._state, 0, terminal, {}
def get_reward_ready(self):
data = comm.recv(source=MPI.ANY_SOURCE, tag=tags.DONE, status=status)
return data, -data['cost']
def reset(self):
self.__init__(self.rank_workers, self.problem)
return self._state
def render(self, mode='human', close=False):
pass
@property
def max_action_size(self):
space = self.problem.space
mx = 0
for k in space:
mx = max(mx, len(space[k]))
return mx
def index2tokens(self, index_list):
token_list = []
space = self.problem.space
for i, k in enumerate(space.keys()):
index = index_list[i]
f_index = index / self.max_action_size # float index
n_index = int(f_index * len(space[k]) + 0.5) # new index
token = space[k][n_index] # assuming that space[k] is a list
token_list.append(token)
return token_list | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/envs/ytopt_env_parallel.py | ytopt_env_parallel.py |
import gym
import numpy as np
from gym import spaces
class YtoptEnv(gym.Env):
eval_counter = 0
def __init__(self, evaluate, problem):
self.evaluate = evaluate
self.problem = problem
self.observation_space = spaces.Box(low=-0, high=self.max_action_size, shape=(1,), dtype=np.float32)
self.action_space = spaces.Discrete(self.max_action_size)
self._state = np.array([1.])
self.action_buffer = []
self.num_timesteps = len(problem.params)
def step(self, action, index):
self.action_buffer.append(action)
if len(self.action_buffer) < self.num_timesteps:
terminal = False
reward = 0
self._state = np.array([float(action)])
return self._state, reward, terminal, {}
conv_action = self.index2tokens(self.action_buffer)
terminal = True
self.action_buffer = []
self._state = np.array([1.])
# EXECUTION
result = self.evaluate(conv_action, YtoptEnv.eval_counter)
YtoptEnv.eval_counter += 1
# ob, reward, terminal
# cost is minimization when ppo is maximization
return self._state, -result['cost'], terminal, {}
def reset(self):
self.__init__(self.evaluate, self.problem)
return self._state
def render(self, mode='human', close=False):
pass
@property
def max_action_size(self):
space = self.problem.space
mx = 0
for k in space:
mx = max(mx, len(space[k]))
return mx
def index2tokens(self, index_list):
token_list = []
space = self.problem.space
for i, k in enumerate(space.keys()):
index = index_list[i]
f_index = index / self.max_action_size # float index
n_index = int(f_index * len(space[k]) + 0.5) # new index
token = space[k][n_index] # assuming that space[k] is a list
token_list.append(token)
return token_list | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/gym_ytopt/envs/ytopt_env.py | ytopt_env.py |
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
from collections import defaultdict
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(' ')
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % log_suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress%s.json' % log_suffix))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress%s.csv' % log_suffix))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb%s' % log_suffix))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
Logger.CURRENT.logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
Logger.CURRENT.logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
class ProfileKV:
"""
Usage:
with logger.ProfileKV("interesting_scope"):
code
"""
def __init__(self, n):
self.n = "wait_" + n
def __enter__(self):
self.t1 = time.time()
def __exit__(self ,type, value, traceback):
Logger.CURRENT.name2val[self.n] += time.time() - self.t1
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if val is None:
self.name2val[key] = None
return
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def configure(dir=None, format_strs=None):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
rank = 0
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ['PMI_RANK', 'OMPI_COMM_WORLD_RANK']:
if varname in os.environ:
rank = int(os.environ[varname])
if rank > 0:
log_suffix = "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv('OPENAI_LOG_FORMAT', 'stdout,log,csv').split(',')
else:
format_strs = os.getenv('OPENAI_LOG_FORMAT_MPI', 'log').split(',')
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s'%dir)
def _configure_default_logger():
format_strs = None
# keep the old default of only writing to stdout
if 'OPENAI_LOG_FORMAT' not in os.environ:
format_strs = ['stdout']
configure(format_strs=format_strs)
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv_mean("b", -22.5)
logkv_mean("b", -44.4)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see b = 33.3")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
# configure the default logger on import
_configure_default_logger()
if __name__ == "__main__":
_demo() | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/logger.py | logger.py |
import numpy as np
import matplotlib
matplotlib.use('TkAgg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
plt.rcParams['svg.fonttype'] = 'none'
from ppo.baselines.bench.monitor import load_results
X_TIMESTEPS = 'timesteps'
X_EPISODES = 'episodes'
X_WALLTIME = 'walltime_hrs'
Y_REWARD = 'reward'
Y_TIMESTEPS = 'timesteps'
POSSIBLE_X_AXES = [X_TIMESTEPS, X_EPISODES, X_WALLTIME]
EPISODES_WINDOW = 100
COLORS = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black', 'purple', 'pink',
'brown', 'orange', 'teal', 'coral', 'lightblue', 'lime', 'lavender', 'turquoise',
'darkgreen', 'tan', 'salmon', 'gold', 'lightpurple', 'darkred', 'darkblue']
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=-1)
return x[window-1:], yw_func
def ts2xy(ts, xaxis, yaxis):
if xaxis == X_TIMESTEPS:
x = np.cumsum(ts.l.values)
elif xaxis == X_EPISODES:
x = np.arange(len(ts))
elif xaxis == X_WALLTIME:
x = ts.t.values / 3600.
else:
raise NotImplementedError
if yaxis == Y_REWARD:
y = ts.r.values
elif yaxis == Y_TIMESTEPS:
y = ts.l.values
else:
raise NotImplementedError
return x, y
def plot_curves(xy_list, xaxis, yaxis, title):
fig = plt.figure(figsize=(8,2))
maxx = max(xy[0][-1] for xy in xy_list)
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
x, y_mean = window_func(x, y, EPISODES_WINDOW, np.mean) #So returns average of last EPISODE_WINDOW episodes
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel(yaxis)
plt.tight_layout()
fig.canvas.mpl_connect('resize_event', lambda event: plt.tight_layout())
plt.grid(True)
def plot_results(dirs, num_timesteps, xaxis, yaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[ts.l.cumsum() <= num_timesteps]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis, yaxis) for ts in tslist]
plot_curves(xy_list, xaxis, yaxis, task_name)
# Example usage in jupyter-notebook
# from ppo.baselines import log_viewer
# %matplotlib inline
# log_viewer.plot_results(["./log"], 10e6, log_viewer.X_TIMESTEPS, "Breakout")
# Here ./log is a directory containing the monitor.csv files
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs = '*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10e6))
parser.add_argument('--xaxis', help = 'Varible on X-axis', default = X_TIMESTEPS)
parser.add_argument('--yaxis', help = 'Varible on Y-axis', default = Y_REWARD)
parser.add_argument('--task_name', help = 'Title of plot', default = 'Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.yaxis, args.task_name)
plt.show()
if __name__ == '__main__':
main() | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/results_plotter.py | results_plotter.py |
import sys
import multiprocessing
import os.path as osp
import gym
from collections import defaultdict
import tensorflow as tf
import numpy as np
from ppo.baselines.common.vec_env.vec_frame_stack import VecFrameStack
from ppo.baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env
from ppo.baselines.common.tf_util import get_session
from ppo.baselines import bench, logger
from importlib import import_module
from ppo.baselines.common.vec_env.vec_normalize import VecNormalize
from ppo.baselines.common import atari_wrappers, retro_wrappers
try:
from mpi4py import MPI
except ImportError:
MPI = None
try:
import pybullet_envs
except ImportError:
pybullet_envs = None
try:
import roboschool
except ImportError:
roboschool = None
_game_envs = defaultdict(set)
for env in gym.envs.registry.all():
# TODO: solve this with regexes
env_type = env._entry_point.split(':')[0].split('.')[-1]
_game_envs[env_type].add(env.id)
# reading benchmark names directly from retro requires
# importing retro here, and for some reason that crashes tensorflow
# in ubuntu
_game_envs['retro'] = {
'BubbleBobble-Nes',
'SuperMarioBros-Nes',
'TwinBee3PokoPokoDaimaou-Nes',
'SpaceHarrier-Nes',
'SonicTheHedgehog-Genesis',
'Vectorman-Genesis',
'FinalFight-Snes',
'SpaceInvaders-Snes',
}
def train(args, extra_args):
env_type, env_id = get_env_type(args.env)
print('env_type: {}'.format(env_type))
total_timesteps = int(args.num_timesteps)
seed = args.seed
learn = get_learn_function(args.alg)
alg_kwargs = get_learn_function_defaults(args.alg, env_type)
alg_kwargs.update(extra_args)
env = build_env(args)
if args.network:
alg_kwargs['network'] = args.network
else:
if alg_kwargs.get('network') is None:
alg_kwargs['network'] = get_default_network(env_type)
print('Training {} on {}:{} with arguments \n{}'.format(args.alg, env_type, env_id, alg_kwargs))
model = learn(
env=env,
seed=seed,
total_timesteps=total_timesteps,
**alg_kwargs
)
return model, env
def build_env(args):
ncpu = multiprocessing.cpu_count()
if sys.platform == 'darwin': ncpu //= 2
nenv = args.num_env or ncpu
alg = args.alg
rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = args.seed
env_type, env_id = get_env_type(args.env)
if env_type == 'atari':
if alg == 'acer':
env = make_vec_env(env_id, env_type, nenv, seed)
elif alg == 'deepq':
env = atari_wrappers.make_atari(env_id)
env.seed(seed)
env = bench.Monitor(env, logger.get_dir())
env = atari_wrappers.wrap_deepmind(env, frame_stack=True)
elif alg == 'trpo_mpi':
env = atari_wrappers.make_atari(env_id)
env.seed(seed)
env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
env = atari_wrappers.wrap_deepmind(env)
# TODO check if the second seeding is necessary, and eventually remove
env.seed(seed)
else:
frame_stack_size = 4
env = VecFrameStack(make_vec_env(env_id, env_type, nenv, seed), frame_stack_size)
elif env_type == 'retro':
import retro
gamestate = args.gamestate or retro.State.DEFAULT
env = retro_wrappers.make_retro(game=args.env, state=gamestate, max_episode_steps=10000,
use_restricted_actions=retro.Actions.DISCRETE)
env.seed(args.seed)
env = bench.Monitor(env, logger.get_dir())
env = retro_wrappers.wrap_deepmind_retro(env)
else:
get_session(tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale)
if env_type == 'mujoco':
env = VecNormalize(env)
return env
def get_env_type(env_id):
if env_id in _game_envs.keys():
env_type = env_id
env_id = [g for g in _game_envs[env_type]][0]
else:
env_type = None
for g, e in _game_envs.items():
if env_id in e:
env_type = g
break
assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())
return env_type, env_id
def get_default_network(env_type):
if env_type == 'atari':
return 'cnn'
else:
return 'mlp'
def get_alg_module(alg, submodule=None):
submodule = submodule or alg
try:
# first try to import the alg module from ppo.baselines
alg_module = import_module('.'.join(['baselines', alg, submodule]))
except ImportError:
# then from rl_algs
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
return alg_module
def get_learn_function(alg):
return get_alg_module(alg).learn
def get_learn_function_defaults(alg, env_type):
try:
alg_defaults = get_alg_module(alg, 'defaults')
kwargs = getattr(alg_defaults, env_type)()
except (ImportError, AttributeError):
kwargs = {}
return kwargs
def parse_cmdline_kwargs(args):
'''
convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible
'''
def parse(v):
assert isinstance(v, str)
try:
return eval(v)
except (NameError, SyntaxError):
return v
return {k: parse(v) for k,v in parse_unknown_args(args).items()}
def main():
# configure logger, disable logging in child MPI processes (with rank > 0)
arg_parser = common_arg_parser()
args, unknown_args = arg_parser.parse_known_args()
extra_args = parse_cmdline_kwargs(unknown_args)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
rank = 0
logger.configure()
else:
logger.configure(format_strs=[])
rank = MPI.COMM_WORLD.Get_rank()
model, env = train(args, extra_args)
env.close()
if args.save_path is not None and rank == 0:
save_path = osp.expanduser(args.save_path)
model.save(save_path)
if args.play:
logger.log("Running trained model")
env = build_env(args)
obs = env.reset()
def initialize_placeholders(nlstm=128,**kwargs):
return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
state, dones = initialize_placeholders(**extra_args)
while True:
actions, _, state, _ = model.step(obs,S=state, M=dones)
obs, _, done, _ = env.step(actions)
env.render()
done = done.any() if isinstance(done, np.ndarray) else done
if done:
obs = env.reset()
env.close()
if __name__ == '__main__':
main() | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/run.py | run.py |
import os
import numpy as np
import tensorflow as tf
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, *, nf, rf, stride, pad='VALID', init_scale=1.0, data_format='NHWC', one_dim_bias=False):
if data_format == 'NHWC':
channel_ax = 3
strides = [1, stride, stride, 1]
bshape = [1, 1, 1, nf]
elif data_format == 'NCHW':
channel_ax = 1
strides = [1, 1, stride, stride]
bshape = [1, nf, 1, 1]
else:
raise NotImplementedError
bias_var_shape = [nf] if one_dim_bias else [1, nf, 1, 1]
nin = x.get_shape()[channel_ax].value
wshape = [rf, rf, nin, nf]
with tf.variable_scope(scope):
w = tf.get_variable("w", wshape, initializer=ortho_init(init_scale))
b = tf.get_variable("b", bias_var_shape, initializer=tf.constant_initializer(0.0))
if not one_dim_bias and data_format == 'NHWC':
b = tf.reshape(b, bshape)
return tf.nn.conv2d(x, w, strides=strides, padding=pad, data_format=data_format) + b
def fc(x, scope, nh, *, init_scale=1.0, init_bias=0.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(init_bias))
return tf.matmul(x, w)+b
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]):
u, s = tf.nn.moments(x, axes=axes, keep_dims=True)
x = (x-u)/tf.sqrt(s+e)
x = x*g+b
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(_ln(c, gc, bc))
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
return tf.trainable_variables(key)
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
def middle_drop(p):
eps = 0.75
if 1-p<eps:
return eps*0.1
return 1-p
def double_linear_con(p):
p *= 2
eps = 0.125
if 1-p<eps:
return eps
return 1-p
def double_middle_drop(p):
eps1 = 0.75
eps2 = 0.25
if 1-p<eps1:
if 1-p<eps2:
return eps2*0.5
return eps1*0.1
return 1-p
schedules = {
'linear':linear,
'constant':constant,
'double_linear_con': double_linear_con,
'middle_drop': middle_drop,
'double_middle_drop': double_middle_drop
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def gradient_add(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def q_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/a2c_utils.py | a2c_utils.py |
import re
import os.path as osp
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
_BENCHMARKS = []
remove_version_re = re.compile(r'-v\d+$')
def register_benchmark(benchmark):
for b in _BENCHMARKS:
if b['name'] == benchmark['name']:
raise ValueError('Benchmark with name %s already registered!' % b['name'])
# automatically add a description if it is not present
if 'tasks' in benchmark:
for t in benchmark['tasks']:
if 'desc' not in t:
t['desc'] = remove_version_re.sub('', t['env_id'])
_BENCHMARKS.append(benchmark)
def list_benchmarks():
return [b['name'] for b in _BENCHMARKS]
def get_benchmark(benchmark_name):
for b in _BENCHMARKS:
if b['name'] == benchmark_name:
return b
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
def get_task(benchmark, env_id):
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
def find_task_for_env_id_in_any_benchmark(env_id):
for bm in _BENCHMARKS:
for task in bm["tasks"]:
if task["env_id"] == env_id:
return bm, task
return None, None
_ATARI_SUFFIX = 'NoFrameskip-v4'
register_benchmark({
'name': 'Atari50M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 50M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(50e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari10M',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 6, 'num_timesteps': int(10e6)} for _game in _atari7]
})
register_benchmark({
'name': 'Atari1Hr',
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_seconds': 60 * 60} for _game in _atari7]
})
register_benchmark({
'name': 'AtariExploration10M',
'description': '7 Atari games emphasizing exploration, with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atariexpl7]
})
# MuJoCo
_mujocosmall = [
'InvertedDoublePendulum-v2', 'InvertedPendulum-v2',
'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2',
'Reacher-v2', 'Swimmer-v2']
register_benchmark({
'name': 'Mujoco1M',
'description': 'Some small 2D MuJoCo tasks, run for 1M timesteps',
'tasks': [{'env_id': _envid, 'trials': 6, 'num_timesteps': int(1e6)} for _envid in _mujocosmall]
})
register_benchmark({
'name': 'MujocoWalkers',
'description': 'MuJoCo forward walkers, run for 8M, humanoid 100M',
'tasks': [
{'env_id': "Hopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Walker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "Humanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
]
})
# Bullet
_bulletsmall = [
'InvertedDoublePendulum', 'InvertedPendulum', 'HalfCheetah', 'Reacher', 'Walker2D', 'Hopper', 'Ant'
]
_bulletsmall = [e + 'BulletEnv-v0' for e in _bulletsmall]
register_benchmark({
'name': 'Bullet1M',
'description': '6 mujoco-like tasks from bullet, 1M steps',
'tasks': [{'env_id': e, 'trials': 6, 'num_timesteps': int(1e6)} for e in _bulletsmall]
})
# Roboschool
register_benchmark({
'name': 'Roboschool8M',
'description': 'Small 2D tasks, up to 30 minutes to complete on 8 cores',
'tasks': [
{'env_id': "RoboschoolReacher-v1", 'trials': 4, 'num_timesteps': 2 * 1000000},
{'env_id': "RoboschoolAnt-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHalfCheetah-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolHopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
{'env_id': "RoboschoolWalker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
]
})
register_benchmark({
'name': 'RoboschoolHarder',
'description': 'Test your might!!! Up to 12 hours on 32 cores',
'tasks': [
{'env_id': "RoboschoolHumanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrun-v1", 'trials': 4, 'num_timesteps': 200 * 1000000},
{'env_id': "RoboschoolHumanoidFlagrunHarder-v1", 'trials': 4, 'num_timesteps': 400 * 1000000},
]
})
# Other
_atari50 = [ # actually 47
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders', 'StarGunner',
'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture',
'VideoPinball', 'WizardOfWor', 'Zaxxon',
]
register_benchmark({
'name': 'Atari50_10M',
'description': '47 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atari50]
})
# HER DDPG
register_benchmark({
'name': 'HerDdpg',
'description': 'Smoke-test only benchmark of HER',
'tasks': [{'trials': 1, 'env_id': 'FetchReach-v1'}]
}) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/bench/benchmarks.py | benchmarks.py |
__all__ = ['Monitor', 'get_monitor_files', 'load_results']
import gym
from gym.core import Wrapper
import time
from glob import glob
import csv
import os.path as osp
import json
import numpy as np
class Monitor(Wrapper):
EXT = "monitor.csv"
f = None
def __init__(self, env, filename, allow_early_resets=False, reset_keywords=(), info_keywords=()):
Wrapper.__init__(self, env=env)
self.tstart = time.time()
self.results_writer = ResultsWriter(
filename,
header={"t_start": time.time(), 'env_id' : env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords
)
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_rewards = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs):
self.reset_state()
for k in self.reset_keywords:
v = kwargs.get(k)
if v is None:
raise ValueError('Expected you to pass kwarg %s into reset'%k)
self.current_reset_info[k] = v
return self.env.reset(**kwargs)
def reset_state(self):
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError("Tried to reset an environment before done. If you want to allow early resets, wrap your env with Monitor(env, path, allow_early_resets=True)")
self.rewards = []
self.needs_reset = False
def step(self, action):
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
ob, rew, done, info = self.env.step(action)
self.update(ob, rew, done, info)
return (ob, rew, done, info)
def update(self, ob, rew, done, info):
self.rewards.append(rew)
if done:
self.needs_reset = True
eprew = sum(self.rewards)
eplen = len(self.rewards)
epinfo = {"r": round(eprew, 6), "l": eplen, "t": round(time.time() - self.tstart, 6)}
for k in self.info_keywords:
epinfo[k] = info[k]
self.episode_rewards.append(eprew)
self.episode_lengths.append(eplen)
self.episode_times.append(time.time() - self.tstart)
epinfo.update(self.current_reset_info)
self.results_writer.write_row(epinfo)
if isinstance(info, dict):
info['episode'] = epinfo
self.total_steps += 1
def close(self):
if self.f is not None:
self.f.close()
def get_total_steps(self):
return self.total_steps
def get_episode_rewards(self):
return self.episode_rewards
def get_episode_lengths(self):
return self.episode_lengths
def get_episode_times(self):
return self.episode_times
class LoadMonitorResultsError(Exception):
pass
class ResultsWriter(object):
def __init__(self, filename=None, header='', extra_keys=()):
self.extra_keys = extra_keys
if filename is None:
self.f = None
self.logger = None
else:
if not filename.endswith(Monitor.EXT):
if osp.isdir(filename):
filename = osp.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
self.f = open(filename, "wt")
if isinstance(header, dict):
header = '# {} \n'.format(json.dumps(header))
self.f.write(header)
self.logger = csv.DictWriter(self.f, fieldnames=('r', 'l', 't')+tuple(extra_keys))
self.logger.writeheader()
self.f.flush()
def write_row(self, epinfo):
if self.logger:
self.logger.writerow(epinfo)
self.f.flush()
def get_monitor_files(dir):
return glob(osp.join(dir, "*" + Monitor.EXT))
def load_results(dir):
import pandas
monitor_files = (
glob(osp.join(dir, "*monitor.json")) +
glob(osp.join(dir, "*monitor.csv"))) # get both csv and (old) json files
if not monitor_files:
raise LoadMonitorResultsError("no monitor files of the form *%s found in %s" % (Monitor.EXT, dir))
dfs = []
headers = []
for fname in monitor_files:
with open(fname, 'rt') as fh:
if fname.endswith('csv'):
firstline = fh.readline()
if not firstline:
continue
assert firstline[0] == '#'
header = json.loads(firstline[1:])
df = pandas.read_csv(fh, index_col=None)
headers.append(header)
elif fname.endswith('json'): # Deprecated json format
episodes = []
lines = fh.readlines()
header = json.loads(lines[0])
headers.append(header)
for line in lines[1:]:
episode = json.loads(line)
episodes.append(episode)
df = pandas.DataFrame(episodes)
else:
assert 0, 'unreachable'
df['t'] += header['t_start']
dfs.append(df)
df = pandas.concat(dfs)
df.sort_values('t', inplace=True)
df.reset_index(inplace=True)
df['t'] -= min(header['t_start'] for header in headers)
df.headers = headers # HACK to preserve backwards compatibility
return df
def test_monitor():
env = gym.make("CartPole-v1")
env.seed(0)
mon_file = "/tmp/baselines-test-%s.monitor.csv" % uuid.uuid4()
menv = Monitor(env, mon_file)
menv.reset()
for _ in range(1000):
_, _, done, _ = menv.step(0)
if done:
menv.reset()
f = open(mon_file, 'rt')
firstline = f.readline()
assert firstline.startswith('#')
metadata = json.loads(firstline[1:])
assert metadata['env_id'] == "CartPole-v1"
assert set(metadata.keys()) == {'env_id', 'gym_version', 't_start'}, "Incorrect keys in monitor metadata"
last_logline = pandas.read_csv(f, index_col=None)
assert set(last_logline.keys()) == {'l', 't', 'r'}, "Incorrect keys in monitor logline"
f.close()
os.remove(mon_file) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/bench/monitor.py | monitor.py |
import gym
import numpy as np
import os
import pickle
import random
import tempfile
import zipfile
def zipsame(*seqs):
L = len(seqs[0])
assert all(len(seq) == L for seq in seqs[1:])
return zip(*seqs)
def unpack(seq, sizes):
"""
Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.
None = just one bare element, not a list
Example:
unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])
"""
seq = list(seq)
it = iter(seq)
assert sum(1 if s is None else s for s in sizes) == len(seq), "Trying to unpack %s into %s" % (seq, sizes)
for size in sizes:
if size is None:
yield it.__next__()
else:
li = []
for _ in range(size):
li.append(it.__next__())
yield li
class EzPickle(object):
"""Objects that are pickled and unpickled via their constructor
arguments.
Example usage:
class Dog(Animal, EzPickle):
def __init__(self, furcolor, tailkind="bushy"):
Animal.__init__()
EzPickle.__init__(furcolor, tailkind)
...
When this object is unpickled, a new Dog will be constructed by passing the provided
furcolor and tailkind into the constructor. However, philosophers are still not sure
whether it is still the same dog.
This is generally needed only for environments which wrap C/C++ code, such as MuJoCo
and Atari.
"""
def __init__(self, *args, **kwargs):
self._ezpickle_args = args
self._ezpickle_kwargs = kwargs
def __getstate__(self):
return {"_ezpickle_args": self._ezpickle_args, "_ezpickle_kwargs": self._ezpickle_kwargs}
def __setstate__(self, d):
out = type(self)(*d["_ezpickle_args"], **d["_ezpickle_kwargs"])
self.__dict__.update(out.__dict__)
def set_global_seeds(i):
try:
import MPI
rank = MPI.COMM_WORLD.Get_rank()
except ImportError:
rank = 0
myseed = i + 1000 * rank if i is not None else None
try:
import tensorflow as tf
tf.set_random_seed(myseed)
except ImportError:
pass
np.random.seed(myseed)
random.seed(myseed)
def pretty_eta(seconds_left):
"""Print the number of seconds in human readable format.
Examples:
2 days
2 hours and 37 minutes
less than a minute
Paramters
---------
seconds_left: int
Number of seconds to be converted to the ETA
Returns
-------
eta: str
String representing the pretty ETA.
"""
minutes_left = seconds_left // 60
seconds_left %= 60
hours_left = minutes_left // 60
minutes_left %= 60
days_left = hours_left // 24
hours_left %= 24
def helper(cnt, name):
return "{} {}{}".format(str(cnt), name, ('s' if cnt > 1 else ''))
if days_left > 0:
msg = helper(days_left, 'day')
if hours_left > 0:
msg += ' and ' + helper(hours_left, 'hour')
return msg
if hours_left > 0:
msg = helper(hours_left, 'hour')
if minutes_left > 0:
msg += ' and ' + helper(minutes_left, 'minute')
return msg
if minutes_left > 0:
return helper(minutes_left, 'minute')
return 'less than a minute'
class RunningAvg(object):
def __init__(self, gamma, init_value=None):
"""Keep a running estimate of a quantity. This is a bit like mean
but more sensitive to recent changes.
Parameters
----------
gamma: float
Must be between 0 and 1, where 0 is the most sensitive to recent
changes.
init_value: float or None
Initial value of the estimate. If None, it will be set on the first update.
"""
self._value = init_value
self._gamma = gamma
def update(self, new_val):
"""Update the estimate.
Parameters
----------
new_val: float
new observated value of estimated quantity.
"""
if self._value is None:
self._value = new_val
else:
self._value = self._gamma * self._value + (1.0 - self._gamma) * new_val
def __float__(self):
"""Get the current estimate"""
return self._value
def boolean_flag(parser, name, default=False, help=None):
"""Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def get_wrapper_by_name(env, classname):
"""Given an a gym environment possibly wrapped multiple times, returns a wrapper
of class named classname or raises ValueError if no such wrapper was applied
Parameters
----------
env: gym.Env of gym.Wrapper
gym environment
classname: str
name of the wrapper
Returns
-------
wrapper: gym.Wrapper
wrapper named classname
"""
currentenv = env
while True:
if classname == currentenv.class_name():
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
raise ValueError("Couldn't find wrapper named %s" % classname)
def relatively_safe_pickle_dump(obj, path, compression=False):
"""This is just like regular pickle dump, except from the fact that failure cases are
different:
- It's never possible that we end up with a pickle in corrupted state.
- If a there was a different file at the path, that file will remain unchanged in the
even of failure (provided that filesystem rename is atomic).
- it is sometimes possible that we end up with useless temp file which needs to be
deleted manually (it will be removed automatically on the next function call)
The indended use case is periodic checkpoints of experiment state, such that we never
corrupt previous checkpoints if the current one fails.
Parameters
----------
obj: object
object to pickle
path: str
path to the output file
compression: bool
if true pickle will be compressed
"""
temp_storage = path + ".relatively_safe"
if compression:
# Using gzip here would be simpler, but the size is limited to 2GB
with tempfile.NamedTemporaryFile() as uncompressed_file:
pickle.dump(obj, uncompressed_file)
uncompressed_file.file.flush()
with zipfile.ZipFile(temp_storage, "w", compression=zipfile.ZIP_DEFLATED) as myzip:
myzip.write(uncompressed_file.name, "data")
else:
with open(temp_storage, "wb") as f:
pickle.dump(obj, f)
os.rename(temp_storage, path)
def pickle_load(path, compression=False):
"""Unpickle a possible compressed pickle.
Parameters
----------
path: str
path to the output file
compression: bool
if true assumes that pickle was compressed when created and attempts decompression.
Returns
-------
obj: object
the unpickled object
"""
if compression:
with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip:
with myzip.open("data") as f:
return pickle.load(f)
else:
with open(path, "rb") as f:
return pickle.load(f) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/misc_util.py | misc_util.py |
import os
try:
from mpi4py import MPI
except ImportError:
MPI = None
import gym
from gym.wrappers import FlattenDictWrapper
from ppo.baselines import logger
from ppo.baselines.bench import Monitor
from ppo.baselines.common import set_global_seeds
from ppo.baselines.common.atari_wrappers import make_atari, wrap_deepmind
from ppo.baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from ppo.baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from ppo.baselines.common.retro_wrappers import RewardScaler
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
if wrapper_kwargs is None: wrapper_kwargs = {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
def make_env(rank): # pylint: disable=C0111
def _thunk():
env = make_atari(env_id) if env_type == 'atari' else gym.make(env_id)
env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
env = Monitor(env,
logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)),
allow_early_resets=True)
if env_type == 'atari': return wrap_deepmind(env, **wrapper_kwargs)
elif reward_scale != 1: return RewardScaler(env, reward_scale)
else: return env
return _thunk
set_global_seeds(seed)
if num_env > 1: return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
else: return DummyVecEnv([make_env(start_index)])
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from ppo.baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env
def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env
def arg_parser():
"""
Create an empty argparse.ArgumentParser.
"""
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def atari_arg_parser():
"""
Create an argparse.ArgumentParser for run_atari.py.
"""
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def mujoco_arg_parser():
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--play', default=False, action='store_true')
return parser
def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser
def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dicitonary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/cmd_util.py | cmd_util.py |
import numpy as np
class Dataset(object):
def __init__(self, data_map, deterministic=False, shuffle=True):
self.data_map = data_map
self.deterministic = deterministic
self.enable_shuffle = shuffle
self.n = next(iter(data_map.values())).shape[0]
self._next_id = 0
self.shuffle()
def shuffle(self):
if self.deterministic:
return
perm = np.arange(self.n)
np.random.shuffle(perm)
for key in self.data_map:
self.data_map[key] = self.data_map[key][perm]
self._next_id = 0
def next_batch(self, batch_size):
if self._next_id >= self.n and self.enable_shuffle:
self.shuffle()
cur_id = self._next_id
cur_batch_size = min(batch_size, self.n - self._next_id)
self._next_id += cur_batch_size
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][cur_id:cur_id+cur_batch_size]
return data_map
def iterate_once(self, batch_size):
if self.enable_shuffle: self.shuffle()
while self._next_id <= self.n - batch_size:
yield self.next_batch(batch_size)
self._next_id = 0
def subset(self, num_elements, deterministic=True):
data_map = dict()
for key in self.data_map:
data_map[key] = self.data_map[key][:num_elements]
return Dataset(data_map, deterministic)
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True):
assert (num_batches is None) != (batch_size is None), 'Provide num_batches or batch_size, but not both'
arrays = tuple(map(np.asarray, arrays))
n = arrays[0].shape[0]
assert all(a.shape[0] == n for a in arrays[1:])
inds = np.arange(n)
if shuffle: np.random.shuffle(inds)
sections = np.arange(0, n, batch_size)[1:] if num_batches is None else num_batches
for batch_inds in np.array_split(inds, sections):
if include_final_partial_batch or len(batch_inds) == batch_size:
yield tuple(a[batch_inds] for a in arrays) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/dataset.py | dataset.py |
from .running_stat import RunningStat
from collections import deque
import numpy as np
class Filter(object):
def __call__(self, x, update=True):
raise NotImplementedError
def reset(self):
pass
class IdentityFilter(Filter):
def __call__(self, x, update=True):
return x
class CompositionFilter(Filter):
def __init__(self, fs):
self.fs = fs
def __call__(self, x, update=True):
for f in self.fs:
x = f(x)
return x
def output_shape(self, input_space):
out = input_space.shape
for f in self.fs:
out = f.output_shape(out)
return out
class ZFilter(Filter):
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
def __call__(self, x, update=True):
if update: self.rs.push(x)
if self.demean:
x = x - self.rs.mean
if self.destd:
x = x / (self.rs.std+1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def output_shape(self, input_space):
return input_space.shape
class AddClock(Filter):
def __init__(self):
self.count = 0
def reset(self):
self.count = 0
def __call__(self, x, update=True):
return np.append(x, self.count/100.0)
def output_shape(self, input_space):
return (input_space.shape[0]+1,)
class FlattenFilter(Filter):
def __call__(self, x, update=True):
return x.ravel()
def output_shape(self, input_space):
return (int(np.prod(input_space.shape)),)
class Ind2OneHotFilter(Filter):
def __init__(self, n):
self.n = n
def __call__(self, x, update=True):
out = np.zeros(self.n)
out[x] = 1
return out
def output_shape(self, input_space):
return (input_space.n,)
class DivFilter(Filter):
def __init__(self, divisor):
self.divisor = divisor
def __call__(self, x, update=True):
return x / self.divisor
def output_shape(self, input_space):
return input_space.shape
class StackFilter(Filter):
def __init__(self, length):
self.stack = deque(maxlen=length)
def reset(self):
self.stack.clear()
def __call__(self, x, update=True):
self.stack.append(x)
while len(self.stack) < self.stack.maxlen:
self.stack.append(x)
return np.concatenate(self.stack, axis=-1)
def output_shape(self, input_space):
return input_space.shape[:-1] + (input_space.shape[-1] * self.stack.maxlen,) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/filters.py | filters.py |
import tensorflow as tf
from ppo.baselines.common import tf_util
from ppo.baselines.a2c.utils import fc
from ppo.baselines.common.distributions import make_pdtype
from ppo.baselines.common.input import observation_placeholder, encode_observation
from ppo.baselines.common.tf_util import adjust_shape
from ppo.baselines.common.mpi_running_mean_std import RunningMeanStd
from ppo.baselines.common.models import get_network_builder
import gym
class PolicyWithValue(object):
"""
Encapsulates fields and methods for RL policy and value function estimation with shared parameters
"""
def __init__(self, env, observations, latent, estimate_q=False, vf_latent=None, sess=None, **tensors):
"""
Parameters:
----------
env RL environment
observations tensorflow placeholder in which the observations will be fed
latent latent state from which policy distribution parameters should be inferred
vf_latent latent state from which value function should be inferred (if None, then latent is used)
sess tensorflow session to run calculations in (if None, default session is used)
**tensors tensorflow tensors for additional attributes such as state or mask
"""
self.X = observations
self.state = tf.constant([])
self.initial_state = None
self.__dict__.update(tensors)
vf_latent = vf_latent if vf_latent is not None else latent
vf_latent = tf.layers.flatten(vf_latent)
latent = tf.layers.flatten(latent)
# Based on the action space, will select what probability distribution type
self.pdtype = make_pdtype(env.action_space)
self.pd, self.pi = self.pdtype.pdfromlatent(latent, init_scale=0.01)
# Take an action
self.action = self.pd.sample()
# Calculate the neg log of our probability
self.neglogp = self.pd.neglogp(self.action)
self.sess = sess
if estimate_q:
assert isinstance(env.action_space, gym.spaces.Discrete)
self.q = fc(vf_latent, 'q', env.action_space.n)
self.vf = self.q
else:
self.vf = fc(vf_latent, 'vf', 1)
self.vf = self.vf[:,0]
def _evaluate(self, variables, observation, **extra_feed):
sess = self.sess or tf.get_default_session()
feed_dict = {self.X: adjust_shape(self.X, observation)}
for inpt_name, data in extra_feed.items():
if inpt_name in self.__dict__.keys():
inpt = self.__dict__[inpt_name]
if isinstance(inpt, tf.Tensor) and inpt._op.type == 'Placeholder':
feed_dict[inpt] = adjust_shape(inpt, data)
return sess.run(variables, feed_dict)
def step(self, observation, **extra_feed):
"""
Compute next action(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
(action, value estimate, next state, negative log likelihood of the action under current policy parameters) tuple
"""
a, v, state, neglogp = self._evaluate([self.action, self.vf, self.state, self.neglogp], observation, **extra_feed)
if state.size == 0:
state = None
return a, v, state, neglogp
def value(self, ob, *args, **kwargs):
"""
Compute value estimate(s) given the observation(s)
Parameters:
----------
observation observation data (either single or a batch)
**extra_feed additional data such as state or mask (names of the arguments should match the ones in constructor, see __init__)
Returns:
-------
value estimate
"""
return self._evaluate(self.vf, ob, *args, **kwargs)
def save(self, save_path):
tf_util.save_state(save_path, sess=self.sess)
def load(self, load_path):
tf_util.load_state(load_path, sess=self.sess)
def build_policy(env, policy_network, value_network=None, normalize_observations=False, estimate_q=False, **policy_kwargs):
if isinstance(policy_network, str):
network_type = policy_network
policy_network = get_network_builder(network_type)(**policy_kwargs)
def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
ob_space = env.observation_space
X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)
extra_tensors = {}
if normalize_observations and X.dtype == tf.float32:
encoded_x, rms = _normalize_clip_observation(X)
extra_tensors['rms'] = rms
else:
encoded_x = X
encoded_x = encode_observation(ob_space, encoded_x)
with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
policy_latent = policy_network(encoded_x)
if isinstance(policy_latent, tuple):
policy_latent, recurrent_tensors = policy_latent
if recurrent_tensors is not None:
# recurrent architecture, need a few more steps
nenv = nbatch // nsteps
assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
extra_tensors.update(recurrent_tensors)
_v_net = value_network
if _v_net is None or _v_net == 'shared':
vf_latent = policy_latent
else:
if _v_net == 'copy':
_v_net = policy_network
else:
assert callable(_v_net)
with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
# TODO recurrent architectures are not supported with value_network=copy yet
vf_latent = _v_net(encoded_x)
policy = PolicyWithValue(
env=env,
observations=X,
latent=policy_latent,
vf_latent=vf_latent,
sess=sess,
estimate_q=estimate_q,
**extra_tensors
)
return policy
return policy_fn
def _normalize_clip_observation(x, clip_range=[-5.0, 5.0]):
rms = RunningMeanStd(shape=x.shape[1:])
norm_x = tf.clip_by_value((x - rms.mean) / rms.std, min(clip_range), max(clip_range))
return norm_x, rms | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/policies.py | policies.py |
import tensorflow as tf
import numpy as np
import ppo.baselines.common.tf_util as U
from ppo.baselines.a2c_utils import fc
from tensorflow.python.ops import math_ops
class Pd(object):
"""
A particular probability distribution
"""
def flatparam(self):
raise NotImplementedError
def mode(self):
raise NotImplementedError
def neglogp(self, x):
# Usually it's easier to define the negative logprob
raise NotImplementedError
def kl(self, other):
raise NotImplementedError
def entropy(self):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def logp(self, x):
return - self.neglogp(x)
def get_shape(self):
return self.flatparam().shape
@property
def shape(self):
return self.get_shape()
def __getitem__(self, idx):
return self.__class__(self.flatparam()[idx])
class PdType(object):
"""
Parametrized family of probability distributions
"""
def pdclass(self):
raise NotImplementedError
def pdfromflat(self, flat):
return self.pdclass()(flat)
def pdfromlatent(self, latent_vector):
raise NotImplementedError
def param_shape(self):
raise NotImplementedError
def sample_shape(self):
raise NotImplementedError
def sample_dtype(self):
raise NotImplementedError
def param_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=tf.float32, shape=prepend_shape+self.param_shape(), name=name)
def sample_placeholder(self, prepend_shape, name=None):
return tf.placeholder(dtype=self.sample_dtype(), shape=prepend_shape+self.sample_shape(), name=name)
def __eq__(self, other):
return (type(self) == type(other)) and (self.__dict__ == other.__dict__)
class CategoricalPdType(PdType):
def __init__(self, ncat):
self.ncat = ncat
def pdclass(self):
return CategoricalPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = fc(latent_vector, 'pi', self.ncat, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
def param_shape(self):
return [self.ncat]
def sample_shape(self):
return []
def sample_dtype(self):
return tf.int32
class MultiCategoricalPdType(PdType):
def __init__(self, nvec):
self.ncats = nvec
def pdclass(self):
return MultiCategoricalPd
def pdfromflat(self, flat):
return MultiCategoricalPd(self.ncats, flat)
def param_shape(self):
return [sum(self.ncats)]
def sample_shape(self):
return [len(self.ncats)]
def sample_dtype(self):
return tf.int32
class DiagGaussianPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return DiagGaussianPd
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
mean = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
logstd = tf.get_variable(name='pi/logstd', shape=[1, self.size], initializer=tf.zeros_initializer())
pdparam = tf.concat([mean, mean * 0.0 + logstd], axis=1)
return self.pdfromflat(pdparam), mean
def param_shape(self):
return [2*self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.float32
class BernoulliPdType(PdType):
def __init__(self, size):
self.size = size
def pdclass(self):
return BernoulliPd
def param_shape(self):
return [self.size]
def sample_shape(self):
return [self.size]
def sample_dtype(self):
return tf.int32
def pdfromlatent(self, latent_vector, init_scale=1.0, init_bias=0.0):
pdparam = fc(latent_vector, 'pi', self.size, init_scale=init_scale, init_bias=init_bias)
return self.pdfromflat(pdparam), pdparam
# WRONG SECOND DERIVATIVES
# class CategoricalPd(Pd):
# def __init__(self, logits):
# self.logits = logits
# self.ps = tf.nn.softmax(logits)
# @classmethod
# def fromflat(cls, flat):
# return cls(flat)
# def flatparam(self):
# return self.logits
# def mode(self):
# return U.argmax(self.logits, axis=-1)
# def logp(self, x):
# return -tf.nn.sparse_softmax_cross_entropy_with_logits(self.logits, x)
# def kl(self, other):
# return tf.nn.softmax_cross_entropy_with_logits(other.logits, self.ps) \
# - tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def entropy(self):
# return tf.nn.softmax_cross_entropy_with_logits(self.logits, self.ps)
# def sample(self):
# u = tf.random_uniform(tf.shape(self.logits))
# return U.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
class CategoricalPd(Pd):
def __init__(self, logits):
self.logits = logits
def flatparam(self):
return self.logits
def mode(self):
return tf.argmax(self.logits, axis=-1)
@property
def mean(self):
return tf.nn.softmax(self.logits)
def neglogp(self, x):
# return tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=x)
# Note: we can't use sparse_softmax_cross_entropy_with_logits because
# the implementation does not allow second-order derivatives...
if x.dtype in {tf.uint8, tf.int32, tf.int64}:
# one-hot encoding
x_shape_list = x.shape.as_list()
logits_shape_list = self.logits.get_shape().as_list()[:-1]
for xs, ls in zip(x_shape_list, logits_shape_list):
if xs is not None and ls is not None:
assert xs == ls, 'shape mismatch: {} in x vs {} in logits'.format(xs, ls)
x = tf.one_hot(x, self.logits.get_shape().as_list()[-1])
else:
# already encoded
assert x.shape.as_list() == self.logits.shape.as_list()
return tf.nn.softmax_cross_entropy_with_logits_v2(
logits=self.logits,
labels=x)
def kl(self, other):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
a1 = other.logits - tf.reduce_max(other.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
ea1 = tf.exp(a1)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
z1 = tf.reduce_sum(ea1, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), axis=-1)
def entropy(self):
a0 = self.logits - tf.reduce_max(self.logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.logits), dtype=self.logits.dtype)
return tf.argmax(self.logits - tf.log(-tf.log(u)), axis=-1)
@classmethod
def fromflat(cls, flat):
return cls(flat)
class MultiCategoricalPd(Pd):
def __init__(self, nvec, flat):
self.flat = flat
self.categoricals = list(map(CategoricalPd, tf.split(flat, nvec, axis=-1)))
def flatparam(self):
return self.flat
def mode(self):
return tf.cast(tf.stack([p.mode() for p in self.categoricals], axis=-1), tf.int32)
def neglogp(self, x):
return tf.add_n([p.neglogp(px) for p, px in zip(self.categoricals, tf.unstack(x, axis=-1))])
def kl(self, other):
return tf.add_n([p.kl(q) for p, q in zip(self.categoricals, other.categoricals)])
def entropy(self):
return tf.add_n([p.entropy() for p in self.categoricals])
def sample(self):
return tf.cast(tf.stack([p.sample() for p in self.categoricals], axis=-1), tf.int32)
@classmethod
def fromflat(cls, flat):
raise NotImplementedError
class DiagGaussianPd(Pd):
def __init__(self, flat):
self.flat = flat
mean, logstd = tf.split(axis=len(flat.shape)-1, num_or_size_splits=2, value=flat)
self.mean = mean
self.logstd = logstd
self.std = tf.exp(logstd)
def flatparam(self):
return self.flat
def mode(self):
return self.mean
def neglogp(self, x):
return 0.5 * tf.reduce_sum(tf.square((x - self.mean) / self.std), axis=-1) \
+ 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[-1]) \
+ tf.reduce_sum(self.logstd, axis=-1)
def kl(self, other):
assert isinstance(other, DiagGaussianPd)
return tf.reduce_sum(other.logstd - self.logstd + (tf.square(self.std) + tf.square(self.mean - other.mean)) / (2.0 * tf.square(other.std)) - 0.5, axis=-1)
def entropy(self):
return tf.reduce_sum(self.logstd + .5 * np.log(2.0 * np.pi * np.e), axis=-1)
def sample(self):
return self.mean + self.std * tf.random_normal(tf.shape(self.mean))
@classmethod
def fromflat(cls, flat):
return cls(flat)
class BernoulliPd(Pd):
def __init__(self, logits):
self.logits = logits
self.ps = tf.sigmoid(logits)
def flatparam(self):
return self.logits
@property
def mean(self):
return self.ps
def mode(self):
return tf.round(self.ps)
def neglogp(self, x):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=tf.to_float(x)), axis=-1)
def kl(self, other):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=other.logits, labels=self.ps), axis=-1) - tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def entropy(self):
return tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.ps), axis=-1)
def sample(self):
u = tf.random_uniform(tf.shape(self.ps))
return tf.to_float(math_ops.less(u, self.ps))
@classmethod
def fromflat(cls, flat):
return cls(flat)
def make_pdtype(ac_space):
from gym import spaces
if isinstance(ac_space, spaces.Box):
assert len(ac_space.shape) == 1
return DiagGaussianPdType(ac_space.shape[0])
elif isinstance(ac_space, spaces.Discrete):
return CategoricalPdType(ac_space.n)
elif isinstance(ac_space, spaces.MultiDiscrete):
return MultiCategoricalPdType(ac_space.nvec)
elif isinstance(ac_space, spaces.MultiBinary):
return BernoulliPdType(ac_space.n)
else:
raise NotImplementedError
def shape_el(v, i):
maybe = v.get_shape()[i]
if maybe is not None:
return maybe
else:
return tf.shape(v)[i]
@U.in_session
def test_probtypes():
np.random.seed(0)
pdparam_diag_gauss = np.array([-.2, .3, .4, -.5, .1, -.5, .1, 0.8])
diag_gauss = DiagGaussianPdType(pdparam_diag_gauss.size // 2) #pylint: disable=E1101
validate_probtype(diag_gauss, pdparam_diag_gauss)
pdparam_categorical = np.array([-.2, .3, .5])
categorical = CategoricalPdType(pdparam_categorical.size) #pylint: disable=E1101
validate_probtype(categorical, pdparam_categorical)
nvec = [1,2,3]
pdparam_multicategorical = np.array([-.2, .3, .5, .1, 1, -.1])
multicategorical = MultiCategoricalPdType(nvec) #pylint: disable=E1101
validate_probtype(multicategorical, pdparam_multicategorical)
pdparam_bernoulli = np.array([-.2, .3, .5])
bernoulli = BernoulliPdType(pdparam_bernoulli.size) #pylint: disable=E1101
validate_probtype(bernoulli, pdparam_bernoulli)
def validate_probtype(probtype, pdparam):
N = 100000
# Check to see if mean negative log likelihood == differential entropy
Mval = np.repeat(pdparam[None, :], N, axis=0)
M = probtype.param_placeholder([N])
X = probtype.sample_placeholder([N])
pd = probtype.pdfromflat(M)
calcloglik = U.function([X, M], pd.logp(X))
calcent = U.function([M], pd.entropy())
Xval = tf.get_default_session().run(pd.sample(), feed_dict={M:Mval})
logliks = calcloglik(Xval, Mval)
entval_ll = - logliks.mean() #pylint: disable=E1101
entval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
entval = calcent(Mval).mean() #pylint: disable=E1101
assert np.abs(entval - entval_ll) < 3 * entval_ll_stderr # within 3 sigmas
# Check to see if kldiv[p,q] = - ent[p] - E_p[log q]
M2 = probtype.param_placeholder([N])
pd2 = probtype.pdfromflat(M2)
q = pdparam + np.random.randn(pdparam.size) * 0.1
Mval2 = np.repeat(q[None, :], N, axis=0)
calckl = U.function([M, M2], pd.kl(pd2))
klval = calckl(Mval, Mval2).mean() #pylint: disable=E1101
logliks = calcloglik(Xval, Mval2)
klval_ll = - entval - logliks.mean() #pylint: disable=E1101
klval_ll_stderr = logliks.std() / np.sqrt(N) #pylint: disable=E1101
assert np.abs(klval - klval_ll) < 3 * klval_ll_stderr # within 3 sigmas
print('ok on', probtype, pdparam) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/distributions.py | distributions.py |
import numpy as np
import scipy.signal
def discount(x, gamma):
"""
computes discounted sums along 0th dimension of x.
inputs
------
x: ndarray
gamma: float
outputs
-------
y: ndarray with same shape as x, satisfying
y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],
where k = len(x) - t - 1
"""
assert x.ndim >= 1
return scipy.signal.lfilter([1],[1,-gamma],x[::-1], axis=0)[::-1]
def explained_variance(ypred,y):
"""
Computes fraction of variance that ypred explains about y.
Returns 1 - Var[y-ypred] / Var[y]
interpretation:
ev=0 => might as well have predicted zero
ev=1 => perfect prediction
ev<0 => worse than just predicting zero
"""
assert y.ndim == 1 and ypred.ndim == 1
vary = np.var(y)
return np.nan if vary==0 else 1 - np.var(y-ypred)/vary
def explained_variance_2d(ypred, y):
assert y.ndim == 2 and ypred.ndim == 2
vary = np.var(y, axis=0)
out = 1 - np.var(y-ypred)/vary
out[vary < 1e-10] = 0
return out
def ncc(ypred, y):
return np.corrcoef(ypred, y)[1,0]
def flatten_arrays(arrs):
return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes):
i=0
arrs = []
for shape in shapes:
size = np.prod(shape)
arr = vec[i:i+size].reshape(shape)
arrs.append(arr)
i += size
return arrs
def discount_with_boundaries(X, New, gamma):
"""
X: 2d array of floats, time x features
New: 2d array of bools, indicating when a new episode has started
"""
Y = np.zeros_like(X)
T = X.shape[0]
Y[T-1] = X[T-1]
for t in range(T-2, -1, -1):
Y[t] = X[t] + gamma * Y[t+1] * (1 - New[t+1])
return Y
def test_discount_with_boundaries():
gamma=0.9
x = np.array([1.0, 2.0, 3.0, 4.0], 'float32')
starts = [1.0, 0.0, 0.0, 1.0]
y = discount_with_boundaries(x, starts, gamma)
assert np.allclose(y, [
1 + gamma * 2 + gamma**2 * 3,
2 + gamma * 3,
3,
4
]) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/math_util.py | math_util.py |
from mpi4py import MPI
import ppo.baselines.common.tf_util as U
import tensorflow as tf
import numpy as np
class MpiAdam(object):
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(U.numel(v) for v in var_list)
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = MPI.COMM_WORLD if comm is None else comm
def update(self, localg, stepsize):
if self.t % 100 == 0:
self.check_synced()
localg = localg.astype('float32')
globalg = np.zeros_like(localg)
self.comm.Allreduce(localg, globalg, op=MPI.SUM)
if self.scale_grad_by_procs:
globalg /= self.comm.Get_size()
self.t += 1
a = stepsize * np.sqrt(1 - self.beta2**self.t)/(1 - self.beta1**self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = (- a) * self.m / (np.sqrt(self.v) + self.epsilon)
self.setfromflat(self.getflat() + step)
def sync(self):
theta = self.getflat()
self.comm.Bcast(theta, root=0)
self.setfromflat(theta)
def check_synced(self):
if self.comm.Get_rank() == 0: # this is root
theta = self.getflat()
self.comm.Bcast(theta, root=0)
else:
thetalocal = self.getflat()
thetaroot = np.empty_like(thetalocal)
self.comm.Bcast(thetaroot, root=0)
assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@U.in_session
def test_MpiAdam():
np.random.seed(0)
tf.set_random_seed(0)
a = tf.Variable(np.random.randn(3).astype('float32'))
b = tf.Variable(np.random.randn(2,5).astype('float32'))
loss = tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))
stepsize = 1e-2
update_op = tf.train.AdamOptimizer(stepsize).minimize(loss)
do_update = U.function([], loss, updates=[update_op])
tf.get_default_session().run(tf.global_variables_initializer())
for i in range(10):
print(i,do_update())
tf.set_random_seed(0)
tf.get_default_session().run(tf.global_variables_initializer())
var_list = [a,b]
lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op])
adam = MpiAdam(var_list)
for i in range(10):
l,g = lossandgrad()
adam.update(g, stepsize)
print(i,l) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/mpi_adam.py | mpi_adam.py |
from __future__ import print_function
from contextlib import contextmanager
import numpy as np
import time
import shlex
import subprocess
# ================================================================
# Misc
# ================================================================
def fmt_row(width, row, header=False):
out = " | ".join(fmt_item(x, width) for x in row)
if header: out = out + "\n" + "-"*len(out)
return out
def fmt_item(x, l):
if isinstance(x, np.ndarray):
assert x.ndim==0
x = x.item()
if isinstance(x, (float, np.float32, np.float64)):
v = abs(x)
if (v < 1e-4 or v > 1e+4) and v > 0:
rep = "%7.2e" % x
else:
rep = "%7.5f" % x
else: rep = str(x)
return " "*(l - len(rep)) + rep
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color='green', bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
def print_cmd(cmd, dry=False):
if isinstance(cmd, str): # for shell=True
pass
else:
cmd = ' '.join(shlex.quote(arg) for arg in cmd)
print(colorize(('CMD: ' if not dry else 'DRY: ') + cmd))
def get_git_commit(cwd=None):
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], cwd=cwd).decode('utf8')
def get_git_commit_message(cwd=None):
return subprocess.check_output(['git', 'show', '-s', '--format=%B', 'HEAD'], cwd=cwd).decode('utf8')
def ccap(cmd, dry=False, env=None, **kwargs):
print_cmd(cmd, dry)
if not dry:
subprocess.check_call(cmd, env=env, **kwargs)
MESSAGE_DEPTH = 0
@contextmanager
def timed(msg):
global MESSAGE_DEPTH #pylint: disable=W0603
print(colorize('\t'*MESSAGE_DEPTH + '=: ' + msg, color='magenta'))
tstart = time.time()
MESSAGE_DEPTH += 1
yield
MESSAGE_DEPTH -= 1
print(colorize('\t'*MESSAGE_DEPTH + "done in %.3f seconds"%(time.time() - tstart), color='magenta')) | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/console_util.py | console_util.py |
from collections import defaultdict
from mpi4py import MPI
import os, numpy as np
import platform
import shutil
import subprocess
def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for var in variables:
if rank == 0:
comm.Bcast(sess.run(var))
else:
import tensorflow as tf
returned_var = np.empty(var.shape, dtype='float32')
comm.Bcast(returned_var)
sess.run(tf.assign(var, returned_var))
def gpu_count():
"""
Count the GPUs on this machine.
"""
if shutil.which('nvidia-smi') is None:
return 0
output = subprocess.check_output(['nvidia-smi', '--query-gpu=gpu_name', '--format=csv'])
return max(0, len(output.split(b'\n')) - 2)
def setup_mpi_gpus():
"""
Set CUDA_VISIBLE_DEVICES using MPI.
"""
num_gpus = gpu_count()
if num_gpus == 0:
return
local_rank, _ = get_local_rank_size(MPI.COMM_WORLD)
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)
def get_local_rank_size(comm):
"""
Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine
"""
this_node = platform.node()
ranks_nodes = comm.allgather((comm.Get_rank(), this_node))
node2rankssofar = defaultdict(int)
local_rank = None
for (rank, node) in ranks_nodes:
if rank == comm.Get_rank():
local_rank = node2rankssofar[node]
node2rankssofar[node] += 1
assert local_rank is not None
return local_rank, node2rankssofar[this_node]
def share_file(comm, path):
"""
Copies the file from rank 0 to all other ranks
Puts it in the same place on all machines
"""
localrank, _ = get_local_rank_size(comm)
if comm.Get_rank() == 0:
with open(path, 'rb') as fh:
data = fh.read()
comm.bcast(data)
else:
data = comm.bcast(None)
if localrank == 0:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'wb') as fh:
fh.write(data)
comm.Barrier()
def dict_gather(comm, d, op='mean', assert_all_have_data=True):
if comm is None: return d
alldicts = comm.allgather(d)
size = comm.size
k2li = defaultdict(list)
for d in alldicts:
for (k,v) in d.items():
k2li[k].append(v)
result = {}
for (k,li) in k2li.items():
if assert_all_have_data:
assert len(li)==size, "only %i out of %i MPI workers have sent '%s'" % (len(li), size, k)
if op=='mean':
result[k] = np.mean(li, axis=0)
elif op=='sum':
result[k] = np.sum(li, axis=0)
else:
assert 0, op
return result | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/mpi_util.py | mpi_util.py |
import numpy as np
import os
os.environ.setdefault('PATH', '')
from collections import deque
import gym
from gym import spaces
import cv2
cv2.ocl.setUseOpenCL(False)
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
"""
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == 'NOOP'
def reset(self, **kwargs):
""" Do no-op action for a number of steps in [1, noop_max]."""
self.env.reset(**kwargs)
if self.override_num_noops is not None:
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101
assert noops > 0
obs = None
for _ in range(noops):
obs, _, done, _ = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
"""Take action on reset for environments that are fixed until firing."""
gym.Wrapper.__init__(self, env)
assert env.unwrapped.get_action_meanings()[1] == 'FIRE'
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(1)
if done:
self.env.reset(**kwargs)
obs, _, done, _ = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
"""Return only every `skip`-th frame"""
gym.Wrapper.__init__(self, env)
# most recent raw observations (for max pooling across time steps)
self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)
self._skip = skip
def step(self, action):
"""Repeat action, sum reward, and max over last observations."""
total_reward = 0.0
done = None
for i in range(self._skip):
obs, reward, done, info = self.env.step(action)
if i == self._skip - 2: self._obs_buffer[0] = obs
if i == self._skip - 1: self._obs_buffer[1] = obs
total_reward += reward
if done:
break
# Note that the observation on the done=True frame
# doesn't matter
max_frame = self._obs_buffer.max(axis=0)
return max_frame, total_reward, done, info
def reset(self, **kwargs):
return self.env.reset(**kwargs)
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
gym.ObservationWrapper.__init__(self, env)
self.width = 84
self.height = 84
self.observation_space = spaces.Box(low=0, high=255,
shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient.
See Also
--------
baselines.common.atari_wrappers.LazyFrames
"""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[0], shp[1], shp[2] * k), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
# careful! This undoes the memory optimization, use
# with smaller replay buffers only.
return np.array(observation).astype(np.float32) / 255.0
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model.
You'd not believe how complex the previous solution was."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=2)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def make_atari(env_id):
env = gym.make(env_id)
assert 'NoFrameskip' in env.spec.id
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
return env
def wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):
"""Configure environment for DeepMind-style Atari.
"""
if episode_life:
env = EpisodicLifeEnv(env)
if 'FIRE' in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, 4)
return env | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/atari_wrappers.py | atari_wrappers.py |
from .atari_wrappers import *
import numpy as np
import gym
class TimeLimit(gym.Wrapper):
def __init__(self, env, max_episode_steps=None):
super(TimeLimit, self).__init__(env)
self._max_episode_steps = max_episode_steps
self._elapsed_steps = 0
def step(self, ac):
observation, reward, done, info = self.env.step(ac)
self._elapsed_steps += 1
if self._elapsed_steps >= self._max_episode_steps:
done = True
info['TimeLimit.truncated'] = True
return observation, reward, done, info
def reset(self, **kwargs):
self._elapsed_steps = 0
return self.env.reset(**kwargs)
class StochasticFrameSkip(gym.Wrapper):
def __init__(self, env, n, stickprob):
gym.Wrapper.__init__(self, env)
self.n = n
self.stickprob = stickprob
self.curac = None
self.rng = np.random.RandomState()
self.supports_want_render = hasattr(env, "supports_want_render")
def reset(self, **kwargs):
self.curac = None
return self.env.reset(**kwargs)
def step(self, ac):
done = False
totrew = 0
for i in range(self.n):
# First step after reset, use action
if self.curac is None:
self.curac = ac
# First substep, delay with probability=stickprob
elif i==0:
if self.rng.rand() > self.stickprob:
self.curac = ac
# Second substep, new action definitely kicks in
elif i==1:
self.curac = ac
if self.supports_want_render and i<self.n-1:
ob, rew, done, info = self.env.step(self.curac, want_render=False)
else:
ob, rew, done, info = self.env.step(self.curac)
totrew += rew
if done: break
return ob, totrew, done, info
def seed(self, s):
self.rng.seed(s)
class PartialFrameStack(gym.Wrapper):
def __init__(self, env, k, channel=1):
"""
Stack one channel (channel keyword) from previous frames
"""
gym.Wrapper.__init__(self, env)
shp = env.observation_space.shape
self.channel = channel
self.observation_space = gym.spaces.Box(low=0, high=255,
shape=(shp[0], shp[1], shp[2] + k - 1),
dtype=env.observation_space.dtype)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
def reset(self):
ob = self.env.reset()
assert ob.shape[2] > self.channel
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, ac):
ob, reward, done, info = self.env.step(ac)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return np.concatenate([frame if i==self.k-1 else frame[:,:,self.channel:self.channel+1]
for (i, frame) in enumerate(self.frames)], axis=2)
class Downsample(gym.ObservationWrapper):
def __init__(self, env, ratio):
"""
Downsample images by a factor of ratio
"""
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, oldc) = env.observation_space.shape
newshape = (oldh//ratio, oldw//ratio, oldc)
self.observation_space = spaces.Box(low=0, high=255,
shape=newshape, dtype=np.uint8)
def observation(self, frame):
height, width, _ = self.observation_space.shape
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
if frame.ndim == 2:
frame = frame[:,:,None]
return frame
class Rgb2gray(gym.ObservationWrapper):
def __init__(self, env):
"""
Downsample images by a factor of ratio
"""
gym.ObservationWrapper.__init__(self, env)
(oldh, oldw, _oldc) = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255,
shape=(oldh, oldw, 1), dtype=np.uint8)
def observation(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return frame[:,:,None]
class MovieRecord(gym.Wrapper):
def __init__(self, env, savedir, k):
gym.Wrapper.__init__(self, env)
self.savedir = savedir
self.k = k
self.epcount = 0
def reset(self):
if self.epcount % self.k == 0:
print('saving movie this episode', self.savedir)
self.env.unwrapped.movie_path = self.savedir
else:
print('not saving this episode')
self.env.unwrapped.movie_path = None
self.env.unwrapped.movie = None
self.epcount += 1
return self.env.reset()
class AppendTimeout(gym.Wrapper):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
self.action_space = env.action_space
self.timeout_space = gym.spaces.Box(low=np.array([0.0]), high=np.array([1.0]), dtype=np.float32)
self.original_os = env.observation_space
if isinstance(self.original_os, gym.spaces.Dict):
import copy
ordered_dict = copy.deepcopy(self.original_os.spaces)
ordered_dict['value_estimation_timeout'] = self.timeout_space
self.observation_space = gym.spaces.Dict(ordered_dict)
self.dict_mode = True
else:
self.observation_space = gym.spaces.Dict({
'original': self.original_os,
'value_estimation_timeout': self.timeout_space
})
self.dict_mode = False
self.ac_count = None
while 1:
if not hasattr(env, "_max_episode_steps"): # Looking for TimeLimit wrapper that has this field
env = env.env
continue
break
self.timeout = env._max_episode_steps
def step(self, ac):
self.ac_count += 1
ob, rew, done, info = self.env.step(ac)
return self._process(ob), rew, done, info
def reset(self):
self.ac_count = 0
return self._process(self.env.reset())
def _process(self, ob):
fracmissing = 1 - self.ac_count / self.timeout
if self.dict_mode:
ob['value_estimation_timeout'] = fracmissing
else:
return { 'original': ob, 'value_estimation_timeout': fracmissing }
class StartDoingRandomActionsWrapper(gym.Wrapper):
"""
Warning: can eat info dicts, not good if you depend on them
"""
def __init__(self, env, max_random_steps, on_startup=True, every_episode=False):
gym.Wrapper.__init__(self, env)
self.on_startup = on_startup
self.every_episode = every_episode
self.random_steps = max_random_steps
self.last_obs = None
if on_startup:
self.some_random_steps()
def some_random_steps(self):
self.last_obs = self.env.reset()
n = np.random.randint(self.random_steps)
#print("running for random %i frames" % n)
for _ in range(n):
self.last_obs, _, done, _ = self.env.step(self.env.action_space.sample())
if done: self.last_obs = self.env.reset()
def reset(self):
return self.last_obs
def step(self, a):
self.last_obs, rew, done, info = self.env.step(a)
if done:
self.last_obs = self.env.reset()
if self.every_episode:
self.some_random_steps()
return self.last_obs, rew, done, info
def make_retro(*, game, state, max_episode_steps, **kwargs):
import retro
env = retro.make(game, state, **kwargs)
env = StochasticFrameSkip(env, n=4, stickprob=0.25)
if max_episode_steps is not None:
env = TimeLimit(env, max_episode_steps=max_episode_steps)
return env
def wrap_deepmind_retro(env, scale=True, frame_stack=4):
"""
Configure environment for retro games, using config similar to DeepMind-style Atari in wrap_deepmind
"""
env = WarpFrame(env)
env = ClipRewardEnv(env)
env = FrameStack(env, frame_stack)
if scale:
env = ScaledFloatFrame(env)
return env
class SonicDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the Sonic game.
"""
def __init__(self, env):
super(SonicDiscretizer, self).__init__(env)
buttons = ["B", "A", "MODE", "START", "UP", "DOWN", "LEFT", "RIGHT", "C", "Y", "X", "Z"]
actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'], ['DOWN'],
['DOWN', 'B'], ['B']]
self._actions = []
for action in actions:
arr = np.array([False] * 12)
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
class RewardScaler(gym.RewardWrapper):
"""
Bring rewards to a reasonable scale for PPO.
This is incredibly important and effects performance
drastically.
"""
def __init__(self, env, scale=0.01):
super(RewardScaler, self).__init__(env)
self.scale = scale
def reward(self, reward):
return reward * self.scale
class AllowBacktracking(gym.Wrapper):
"""
Use deltas in max(X) as the reward, rather than deltas
in X. This way, agents are not discouraged too heavily
from exploring backwards if there is no way to advance
head-on in the level.
"""
def __init__(self, env):
super(AllowBacktracking, self).__init__(env)
self._cur_x = 0
self._max_x = 0
def reset(self, **kwargs): # pylint: disable=E0202
self._cur_x = 0
self._max_x = 0
return self.env.reset(**kwargs)
def step(self, action): # pylint: disable=E0202
obs, rew, done, info = self.env.step(action)
self._cur_x += rew
rew = max(0, self._cur_x - self._max_x)
self._max_x = max(self._max_x, self._cur_x)
return obs, rew, done, info | ytopt | /ytopt-0.0.1.tar.gz/ytopt-0.0.1/ppo/baselines/common/retro_wrappers.py | retro_wrappers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.