mirror of https://github.com/streamlink/streamlink
chore: add "B" rules to ruff config
This commit is contained in:
parent
90ccc1039c
commit
5e6f03c3cd
|
@ -217,7 +217,7 @@ class PluginFinder:
|
||||||
return pluginmetadata
|
return pluginmetadata
|
||||||
|
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise ExtensionError(f"Error while parsing plugin file {pluginfile.name}", err)
|
raise ExtensionError(f"Error while parsing plugin file {pluginfile.name}", err) from err
|
||||||
|
|
||||||
|
|
||||||
class PluginsDirective(Directive):
|
class PluginsDirective(Directive):
|
||||||
|
|
|
@ -7,7 +7,7 @@ from docutils import nodes
|
||||||
from sphinx.util.nodes import split_explicit_title
|
from sphinx.util.nodes import split_explicit_title
|
||||||
|
|
||||||
|
|
||||||
def releaseref_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
|
def releaseref_role(name, rawtext, text, lineno, inliner, options=None, content=None):
|
||||||
config = inliner.document.settings.env.config
|
config = inliner.document.settings.env.config
|
||||||
text = text.replace("|version|", config.version)
|
text = text.replace("|version|", config.version)
|
||||||
text = text.replace("|release|", config.release)
|
text = text.replace("|release|", config.release)
|
||||||
|
@ -16,7 +16,7 @@ def releaseref_role(name, rawtext, text, lineno, inliner, options={}, content=[]
|
||||||
if not has_explicit_title:
|
if not has_explicit_title:
|
||||||
title = os.path.basename(target)
|
title = os.path.basename(target)
|
||||||
|
|
||||||
node = nodes.reference(rawtext, title, refuri=target, **options)
|
node = nodes.reference(rawtext, title, refuri=target, **(options or {}))
|
||||||
|
|
||||||
return [node], []
|
return [node], []
|
||||||
|
|
||||||
|
|
|
@ -81,6 +81,8 @@ select = [
|
||||||
"I",
|
"I",
|
||||||
# flake8-builtins
|
# flake8-builtins
|
||||||
"A",
|
"A",
|
||||||
|
# flake8-bugbear
|
||||||
|
"B",
|
||||||
# flake8-commas
|
# flake8-commas
|
||||||
"COM",
|
"COM",
|
||||||
# flake8-comprehensions
|
# flake8-comprehensions
|
||||||
|
@ -100,6 +102,7 @@ select = [
|
||||||
]
|
]
|
||||||
extend-ignore = [
|
extend-ignore = [
|
||||||
"A003", # builtin-attribute-shadowing
|
"A003", # builtin-attribute-shadowing
|
||||||
|
"B028", # no-explicit-stacklevel
|
||||||
"C408", # unnecessary-collection-call
|
"C408", # unnecessary-collection-call
|
||||||
"ISC003", # explicit-string-concatenation
|
"ISC003", # explicit-string-concatenation
|
||||||
"PLC1901", # compare-to-empty-string
|
"PLC1901", # compare-to-empty-string
|
||||||
|
|
|
@ -128,7 +128,7 @@ class Git:
|
||||||
ref,
|
ref,
|
||||||
)
|
)
|
||||||
except subprocess.CalledProcessError as err:
|
except subprocess.CalledProcessError as err:
|
||||||
raise ValueError(f"Could not get tag from git:\n{err.stderr}")
|
raise ValueError(f"Could not get tag from git:\n{err.stderr}") from err
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def shortlog(cls, start: str, end: str) -> str:
|
def shortlog(cls, start: str, end: str) -> str:
|
||||||
|
@ -141,7 +141,7 @@ class Git:
|
||||||
f"{start}...{end}",
|
f"{start}...{end}",
|
||||||
)
|
)
|
||||||
except subprocess.CalledProcessError as err:
|
except subprocess.CalledProcessError as err:
|
||||||
raise ValueError(f"Could not get shortlog from git:\n{err.stderr}")
|
raise ValueError(f"Could not get shortlog from git:\n{err.stderr}") from err
|
||||||
|
|
||||||
|
|
||||||
class GitHubAPI:
|
class GitHubAPI:
|
||||||
|
@ -375,15 +375,15 @@ class Release:
|
||||||
log.debug(f"Opening release template file: {self.template}")
|
log.debug(f"Opening release template file: {self.template}")
|
||||||
try:
|
try:
|
||||||
return self._read_file(self.template)
|
return self._read_file(self.template)
|
||||||
except IOError:
|
except OSError as err:
|
||||||
raise IOError("Missing release template file")
|
raise OSError("Missing release template file") from err
|
||||||
|
|
||||||
def _read_changelog(self):
|
def _read_changelog(self):
|
||||||
log.debug(f"Opening changelog file: {self.changelog}")
|
log.debug(f"Opening changelog file: {self.changelog}")
|
||||||
try:
|
try:
|
||||||
return self._read_file(self.changelog)
|
return self._read_file(self.changelog)
|
||||||
except IOError:
|
except OSError as err:
|
||||||
raise IOError("Missing changelog file")
|
raise OSError("Missing changelog file") from err
|
||||||
|
|
||||||
def _get_changelog(self) -> dict:
|
def _get_changelog(self) -> dict:
|
||||||
changelog = self._read_changelog()
|
changelog = self._read_changelog()
|
||||||
|
@ -444,7 +444,7 @@ class Release:
|
||||||
return jinjatemplate.render(context)
|
return jinjatemplate.render(context)
|
||||||
|
|
||||||
|
|
||||||
def main(args: object):
|
def main(args: argparse.Namespace):
|
||||||
# if no tag was provided, get the current tag from `git describe --tags`
|
# if no tag was provided, get the current tag from `git describe --tags`
|
||||||
tag = args.tag or Git.tag()
|
tag = args.tag or Git.tag()
|
||||||
if not tag:
|
if not tag:
|
||||||
|
|
|
@ -65,8 +65,8 @@ def main(api_key: str, file: Path):
|
||||||
result: dict = data and data.get("result") or {}
|
result: dict = data and data.get("result") or {}
|
||||||
if result.get("code") != "success":
|
if result.get("code") != "success":
|
||||||
raise ValueError(result.get("message") or "Missing version_data in JSON response")
|
raise ValueError(result.get("message") or "Missing version_data in JSON response")
|
||||||
except requests.exceptions.RequestException:
|
except requests.exceptions.RequestException as err:
|
||||||
raise ValueError("Error while querying API or parsing JSON response")
|
raise ValueError("Error while querying API or parsing JSON response") from err
|
||||||
|
|
||||||
version_data: dict = data.get("version_data") or {}
|
version_data: dict = data.get("version_data") or {}
|
||||||
user_agents = {}
|
user_agents = {}
|
||||||
|
@ -75,8 +75,8 @@ def main(api_key: str, file: Path):
|
||||||
for item in seq:
|
for item in seq:
|
||||||
try:
|
try:
|
||||||
obj = obj[item]
|
obj = obj[item]
|
||||||
except KeyError:
|
except KeyError as err:
|
||||||
raise ValueError(f"Invalid key: {item} ({seq})")
|
raise ValueError(f"Invalid key: {item} ({seq})") from err
|
||||||
|
|
||||||
if type(obj) is not str:
|
if type(obj) is not str:
|
||||||
raise ValueError(f"Invalid result: {obj!r} ({seq})")
|
raise ValueError(f"Invalid result: {obj!r} ({seq})")
|
||||||
|
|
|
@ -152,11 +152,11 @@ class HTTPSession(Session):
|
||||||
res = super().request(
|
res = super().request(
|
||||||
method,
|
method,
|
||||||
url,
|
url,
|
||||||
|
*args,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
params=params,
|
params=params,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
proxies=proxies,
|
proxies=proxies,
|
||||||
*args,
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
)
|
)
|
||||||
if raise_for_status and res.status_code not in acceptable_status:
|
if raise_for_status and res.status_code not in acceptable_status:
|
||||||
|
|
|
@ -73,8 +73,8 @@ def _deprecations():
|
||||||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||||
__all__.extend(deprecations.keys())
|
__all__.extend(deprecations.keys())
|
||||||
|
|
||||||
setattr(sys.modules[__name__], "__getattr__", __getattr__)
|
sys.modules[__name__].__getattr__ = __getattr__
|
||||||
setattr(sys.modules[__name__], "__all__", __all__)
|
sys.modules[__name__].__all__ = __all__
|
||||||
|
|
||||||
|
|
||||||
_deprecations()
|
_deprecations()
|
||||||
|
|
|
@ -151,8 +151,8 @@ def _validate_pattern(schema: Pattern, value):
|
||||||
|
|
||||||
@validate.register
|
@validate.register
|
||||||
def _validate_allschema(schema: AllSchema, value):
|
def _validate_allschema(schema: AllSchema, value):
|
||||||
for schema in schema.schema:
|
for subschema in schema.schema:
|
||||||
value = validate(schema, value)
|
value = validate(subschema, value)
|
||||||
|
|
||||||
return value
|
return value
|
||||||
|
|
||||||
|
@ -173,8 +173,8 @@ def _validate_anyschema(schema: AnySchema, value):
|
||||||
def _validate_noneorallschema(schema: NoneOrAllSchema, value):
|
def _validate_noneorallschema(schema: NoneOrAllSchema, value):
|
||||||
if value is not None:
|
if value is not None:
|
||||||
try:
|
try:
|
||||||
for schema in schema.schema:
|
for subschema in schema.schema:
|
||||||
value = validate(schema, value)
|
value = validate(subschema, value)
|
||||||
except ValidationError as err:
|
except ValidationError as err:
|
||||||
raise ValidationError(err, schema=NoneOrAllSchema) from None
|
raise ValidationError(err, schema=NoneOrAllSchema) from None
|
||||||
|
|
||||||
|
@ -376,13 +376,13 @@ def validate_union(schema, value):
|
||||||
@validate_union.register(dict)
|
@validate_union.register(dict)
|
||||||
def _validate_union_dict(schema, value):
|
def _validate_union_dict(schema, value):
|
||||||
new = type(schema)()
|
new = type(schema)()
|
||||||
for key, schema in schema.items():
|
for key, subschema in schema.items():
|
||||||
is_optional = isinstance(key, OptionalSchema)
|
is_optional = isinstance(key, OptionalSchema)
|
||||||
if is_optional:
|
if is_optional:
|
||||||
key = key.key
|
key = key.key
|
||||||
|
|
||||||
try:
|
try:
|
||||||
new[key] = validate(schema, value)
|
new[key] = validate(subschema, value)
|
||||||
except ValidationError as err:
|
except ValidationError as err:
|
||||||
if is_optional:
|
if is_optional:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -123,9 +123,9 @@ def iterate_streams(streams):
|
||||||
for name, stream in streams:
|
for name, stream in streams:
|
||||||
if isinstance(stream, list):
|
if isinstance(stream, list):
|
||||||
for sub_stream in stream:
|
for sub_stream in stream:
|
||||||
yield (name, sub_stream)
|
yield name, sub_stream
|
||||||
else:
|
else:
|
||||||
yield (name, stream)
|
yield name, stream
|
||||||
|
|
||||||
|
|
||||||
def stream_type_priority(stream_types, stream):
|
def stream_type_priority(stream_types, stream):
|
||||||
|
@ -374,7 +374,7 @@ class Plugin:
|
||||||
def default_stream_types(cls, streams):
|
def default_stream_types(cls, streams):
|
||||||
stream_types = ["hls", "http"]
|
stream_types = ["hls", "http"]
|
||||||
|
|
||||||
for name, stream in iterate_streams(streams):
|
for _name, stream in iterate_streams(streams):
|
||||||
stream_type = type(stream).shortname()
|
stream_type = type(stream).shortname()
|
||||||
|
|
||||||
if stream_type not in stream_types:
|
if stream_type not in stream_types:
|
||||||
|
@ -428,7 +428,7 @@ class Plugin:
|
||||||
except NoStreamsError:
|
except NoStreamsError:
|
||||||
return {}
|
return {}
|
||||||
except (OSError, ValueError) as err:
|
except (OSError, ValueError) as err:
|
||||||
raise PluginError(err)
|
raise PluginError(err) from err
|
||||||
|
|
||||||
if not ostreams:
|
if not ostreams:
|
||||||
return {}
|
return {}
|
||||||
|
@ -482,7 +482,7 @@ class Plugin:
|
||||||
|
|
||||||
# Create the best/worst synonyms
|
# Create the best/worst synonyms
|
||||||
def stream_weight_only(s):
|
def stream_weight_only(s):
|
||||||
return (self.stream_weight(s)[0] or (len(streams) == 1 and 1))
|
return self.stream_weight(s)[0] or (len(streams) == 1 and 1)
|
||||||
|
|
||||||
stream_names = filter(stream_weight_only, streams.keys())
|
stream_names = filter(stream_weight_only, streams.keys())
|
||||||
sorted_streams = sorted(stream_names, key=stream_weight_only)
|
sorted_streams = sorted(stream_names, key=stream_weight_only)
|
||||||
|
@ -633,7 +633,7 @@ class Plugin:
|
||||||
try:
|
try:
|
||||||
return user_input_requester.ask(prompt)
|
return user_input_requester.ask(prompt)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
raise FatalPluginError(f"User input error: {err}")
|
raise FatalPluginError(f"User input error: {err}") from err
|
||||||
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
||||||
|
|
||||||
def input_ask_password(self, prompt: str) -> str:
|
def input_ask_password(self, prompt: str) -> str:
|
||||||
|
@ -642,7 +642,7 @@ class Plugin:
|
||||||
try:
|
try:
|
||||||
return user_input_requester.ask_password(prompt)
|
return user_input_requester.ask_password(prompt)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
raise FatalPluginError(f"User input error: {err}")
|
raise FatalPluginError(f"User input error: {err}") from err
|
||||||
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -179,14 +179,14 @@ class AbemaTV(Plugin):
|
||||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||||
h.update(self.SECRETKEY)
|
h.update(self.SECRETKEY)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
for i in range(time_struct.tm_mon):
|
for _ in range(time_struct.tm_mon):
|
||||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||||
h.update(tmp)
|
h.update(tmp)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||||
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
|
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
for i in range(time_struct.tm_mday % 5):
|
for _ in range(time_struct.tm_mday % 5):
|
||||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||||
h.update(tmp)
|
h.update(tmp)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
|
@ -195,7 +195,7 @@ class AbemaTV(Plugin):
|
||||||
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
|
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
|
|
||||||
for i in range(time_struct.tm_hour % 5): # utc hour
|
for _ in range(time_struct.tm_hour % 5): # utc hour
|
||||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||||
h.update(tmp)
|
h.update(tmp)
|
||||||
tmp = h.digest()
|
tmp = h.digest()
|
||||||
|
|
|
@ -67,7 +67,7 @@ class ArteTV(Plugin):
|
||||||
|
|
||||||
self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
|
self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
|
||||||
|
|
||||||
for slot, protocol, url in sorted(streams, key=itemgetter(0)):
|
for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
|
||||||
return HLSStream.parse_variant_playlist(self.session, url)
|
return HLSStream.parse_variant_playlist(self.session, url)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -134,9 +134,9 @@ class BBCiPlayer(Plugin):
|
||||||
for connection in media["connection"]:
|
for connection in media["connection"]:
|
||||||
urls[connection.get("transferFormat")].add(connection["href"])
|
urls[connection.get("transferFormat")].add(connection["href"])
|
||||||
|
|
||||||
for stream_type, urls in urls.items():
|
for stream_type, urlitems in urls.items():
|
||||||
log.debug(f"{len(urls)} {stream_type} streams")
|
log.debug(f"{len(urlitems)} {stream_type} streams")
|
||||||
for url in list(urls):
|
for url in list(urlitems):
|
||||||
try:
|
try:
|
||||||
if stream_type == "hls":
|
if stream_type == "hls":
|
||||||
yield from HLSStream.parse_variant_playlist(self.session, url).items()
|
yield from HLSStream.parse_variant_playlist(self.session, url).items()
|
||||||
|
|
|
@ -315,7 +315,7 @@ class Crunchyroll(Plugin):
|
||||||
info = api.get_info(media_id, fields=["media.name", "media.series_name",
|
info = api.get_info(media_id, fields=["media.name", "media.series_name",
|
||||||
"media.media_type", "media.stream_data"], schema=_media_schema)
|
"media.media_type", "media.stream_data"], schema=_media_schema)
|
||||||
except CrunchyrollAPIError as err:
|
except CrunchyrollAPIError as err:
|
||||||
raise PluginError(f"Media lookup error: {err.msg}")
|
raise PluginError(f"Media lookup error: {err.msg}") from err
|
||||||
|
|
||||||
if not info:
|
if not info:
|
||||||
return
|
return
|
||||||
|
@ -390,7 +390,7 @@ class Crunchyroll(Plugin):
|
||||||
log.info(f"Logged in as '{login_name}'")
|
log.info(f"Logged in as '{login_name}'")
|
||||||
|
|
||||||
except CrunchyrollAPIError as err:
|
except CrunchyrollAPIError as err:
|
||||||
raise PluginError(f"Authentication error: {err.msg}")
|
raise PluginError(f"Authentication error: {err.msg}") from err
|
||||||
if not api.auth:
|
if not api.auth:
|
||||||
log.warning("No authentication provided, you won't be able to access premium restricted content")
|
log.warning("No authentication provided, you won't be able to access premium restricted content")
|
||||||
|
|
||||||
|
|
|
@ -204,7 +204,7 @@ class Filmon(Plugin):
|
||||||
self.session.http.get(self.url)
|
self.session.http.get(self.url)
|
||||||
|
|
||||||
if vod_id:
|
if vod_id:
|
||||||
for quality, url, timeout in self.api.vod(vod_id):
|
for quality, url, _timeout in self.api.vod(vod_id):
|
||||||
if url.endswith(".m3u8"):
|
if url.endswith(".m3u8"):
|
||||||
streams = HLSStream.parse_variant_playlist(self.session, url)
|
streams = HLSStream.parse_variant_playlist(self.session, url)
|
||||||
if streams:
|
if streams:
|
||||||
|
@ -234,7 +234,7 @@ class Filmon(Plugin):
|
||||||
raise PluginError(f"Unable to find channel ID: {channel}")
|
raise PluginError(f"Unable to find channel ID: {channel}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for quality, url, timeout in self.api.channel(_id):
|
for quality, url, _timeout in self.api.channel(_id):
|
||||||
yield quality, FilmOnHLS(self.session, url, self.api, channel=_id, quality=quality)
|
yield quality, FilmOnHLS(self.session, url, self.api, channel=_id, quality=quality)
|
||||||
except Exception:
|
except Exception:
|
||||||
if channel and not channel.isdigit():
|
if channel and not channel.isdigit():
|
||||||
|
|
|
@ -7,7 +7,7 @@ $type live
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from streamlink.exceptions import FatalPluginError, NoStreamsError, PluginError
|
from streamlink.exceptions import FatalPluginError, NoStreamsError
|
||||||
from streamlink.plugin import Plugin, pluginargument, pluginmatcher
|
from streamlink.plugin import Plugin, pluginargument, pluginmatcher
|
||||||
from streamlink.plugin.api import validate
|
from streamlink.plugin.api import validate
|
||||||
from streamlink.stream.hls import HLSStream
|
from streamlink.stream.hls import HLSStream
|
||||||
|
@ -158,9 +158,11 @@ class Pixiv(Plugin):
|
||||||
# other co-hosts
|
# other co-hosts
|
||||||
self.set_option("performer", co_hosts[number - 1][0])
|
self.set_option("performer", co_hosts[number - 1][0])
|
||||||
except FatalPluginError:
|
except FatalPluginError:
|
||||||
raise PluginError("Selected performer is invalid.")
|
log.error("Selected performer is invalid.")
|
||||||
|
return
|
||||||
except (IndexError, ValueError, TypeError):
|
except (IndexError, ValueError, TypeError):
|
||||||
raise PluginError("Input is invalid")
|
log.error("Input is invalid")
|
||||||
|
return
|
||||||
|
|
||||||
# ignore the owner stream, if a performer is selected
|
# ignore the owner stream, if a performer is selected
|
||||||
# or use it when there are no other performers
|
# or use it when there are no other performers
|
||||||
|
|
|
@ -11,7 +11,7 @@ import logging
|
||||||
import re
|
import re
|
||||||
from urllib.parse import urljoin
|
from urllib.parse import urljoin
|
||||||
|
|
||||||
from streamlink.plugin import Plugin, PluginError, pluginmatcher
|
from streamlink.plugin import Plugin, pluginmatcher
|
||||||
from streamlink.plugin.api import validate
|
from streamlink.plugin.api import validate
|
||||||
from streamlink.stream.hls import HLSStream
|
from streamlink.stream.hls import HLSStream
|
||||||
|
|
||||||
|
@ -63,7 +63,8 @@ class TV4Play(Plugin):
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "404 Client Error" in str(e):
|
if "404 Client Error" in str(e):
|
||||||
raise PluginError("This Video is not available")
|
log.error("This Video is not available")
|
||||||
|
return
|
||||||
raise e
|
raise e
|
||||||
log.debug("Found metadata")
|
log.debug("Found metadata")
|
||||||
metadata = self.session.http.json(res, schema=self._meta_schema)
|
metadata = self.session.http.json(res, schema=self._meta_schema)
|
||||||
|
@ -82,7 +83,8 @@ class TV4Play(Plugin):
|
||||||
res = self.session.http.get(urljoin(self.api_url, metadata["mediaUri"]))
|
res = self.session.http.get(urljoin(self.api_url, metadata["mediaUri"]))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
if "401 Client Error" in str(e):
|
if "401 Client Error" in str(e):
|
||||||
raise PluginError("This Video is not available in your country")
|
log.error("This Video is not available in your country")
|
||||||
|
return
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
log.debug("Found stream data")
|
log.debug("Found stream data")
|
||||||
|
|
|
@ -638,7 +638,7 @@ class Twitch(Plugin):
|
||||||
raise PluginError
|
raise PluginError
|
||||||
sig, token = data
|
sig, token = data
|
||||||
except (PluginError, TypeError):
|
except (PluginError, TypeError):
|
||||||
raise NoStreamsError
|
raise NoStreamsError # noqa: B904
|
||||||
|
|
||||||
try:
|
try:
|
||||||
restricted_bitrates = self.api.parse_token(token)
|
restricted_bitrates = self.api.parse_token(token)
|
||||||
|
@ -699,7 +699,7 @@ class Twitch(Plugin):
|
||||||
if "404 Client Error" in err or "Failed to parse playlist" in err:
|
if "404 Client Error" in err or "Failed to parse playlist" in err:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
raise PluginError(err)
|
raise PluginError(err) from err
|
||||||
|
|
||||||
for name in restricted_bitrates:
|
for name in restricted_bitrates:
|
||||||
if name not in streams:
|
if name not in streams:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
"""
|
"""
|
||||||
$description Russian live streaming social platform.
|
$description Russian live-streaming social platform.
|
||||||
$url wasd.tv
|
$url wasd.tv
|
||||||
$type live
|
$type live
|
||||||
"""
|
"""
|
||||||
|
@ -75,11 +75,11 @@ class WASD(Plugin):
|
||||||
for stream in json_res["media_container_streams"]:
|
for stream in json_res["media_container_streams"]:
|
||||||
log.debug("media_container_status: {0}, media_container_online_status: {1}".format(
|
log.debug("media_container_status: {0}, media_container_online_status: {1}".format(
|
||||||
json_res["media_container_status"], json_res["media_container_online_status"]))
|
json_res["media_container_status"], json_res["media_container_online_status"]))
|
||||||
for stream in stream["stream_media"]:
|
for stream_media in stream["stream_media"]:
|
||||||
if stream["media_status"] == "STOPPED":
|
if stream_media["media_status"] == "STOPPED":
|
||||||
hls_url = stream["media_meta"]["media_archive_url"]
|
hls_url = stream_media["media_meta"]["media_archive_url"]
|
||||||
elif stream["media_status"] == "RUNNING":
|
else:
|
||||||
hls_url = stream["media_meta"]["media_url"]
|
hls_url = stream_media["media_meta"]["media_url"]
|
||||||
|
|
||||||
yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
|
yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ class WWENetwork(Plugin):
|
||||||
return self.auth_token
|
return self.auth_token
|
||||||
|
|
||||||
@property # type: ignore
|
@property # type: ignore
|
||||||
@lru_cache(maxsize=128)
|
@lru_cache(maxsize=128) # noqa: B019
|
||||||
def item_config(self):
|
def item_config(self):
|
||||||
log.debug("Loading page config")
|
log.debug("Loading page config")
|
||||||
p = urlparse(self.url)
|
p = urlparse(self.url)
|
||||||
|
|
|
@ -224,7 +224,7 @@ class YouTube(Plugin):
|
||||||
best_audio_itag = None
|
best_audio_itag = None
|
||||||
|
|
||||||
# Extract audio streams from the adaptive format list
|
# Extract audio streams from the adaptive format list
|
||||||
for url, label, itag, mimeType in adaptive_formats:
|
for url, _label, itag, mimeType in adaptive_formats:
|
||||||
if url is None:
|
if url is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -511,7 +511,7 @@ class Streamlink:
|
||||||
plugincls = self.plugins[plugin]
|
plugincls = self.plugins[plugin]
|
||||||
return plugincls.get_option(key)
|
return plugincls.get_option(key)
|
||||||
|
|
||||||
@lru_cache(maxsize=128)
|
@lru_cache(maxsize=128) # noqa: B019
|
||||||
def resolve_url(
|
def resolve_url(
|
||||||
self,
|
self,
|
||||||
url: str,
|
url: str,
|
||||||
|
@ -614,7 +614,7 @@ class Streamlink:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
success = False
|
success = False
|
||||||
for loader, name, ispkg in pkgutil.iter_modules([path]):
|
for _loader, name, _ispkg in pkgutil.iter_modules([path]):
|
||||||
# set the full plugin module name
|
# set the full plugin module name
|
||||||
# use the "streamlink.plugins." prefix even for sideloaded plugins
|
# use the "streamlink.plugins." prefix even for sideloaded plugins
|
||||||
module_name = f"streamlink.plugins.{name}"
|
module_name = f"streamlink.plugins.{name}"
|
||||||
|
|
|
@ -951,7 +951,7 @@ class SegmentTimeline(MPDNode):
|
||||||
if t == 0 and tsegment.t is not None:
|
if t == 0 and tsegment.t is not None:
|
||||||
t = tsegment.t
|
t = tsegment.t
|
||||||
# check the start time from MPD
|
# check the start time from MPD
|
||||||
for repeated_i in range(tsegment.r + 1):
|
for _ in range(tsegment.r + 1):
|
||||||
yield TimelineSegment(t, tsegment.d)
|
yield TimelineSegment(t, tsegment.d)
|
||||||
t += tsegment.d
|
t += tsegment.d
|
||||||
|
|
||||||
|
|
|
@ -52,7 +52,7 @@ class MuxedStream(Stream):
|
||||||
maps = self.options.get("maps", [])
|
maps = self.options.get("maps", [])
|
||||||
# only update the maps values if they haven't been set
|
# only update the maps values if they haven't been set
|
||||||
update_maps = not maps
|
update_maps = not maps
|
||||||
for i, substream in enumerate(self.substreams):
|
for substream in self.substreams:
|
||||||
log.debug("Opening {0} substream".format(substream.shortname()))
|
log.debug("Opening {0} substream".format(substream.shortname()))
|
||||||
if update_maps:
|
if update_maps:
|
||||||
maps.append(len(fds))
|
maps.append(len(fds))
|
||||||
|
|
|
@ -330,7 +330,7 @@ class HLSStreamWorker(SegmentedStreamWorker):
|
||||||
try:
|
try:
|
||||||
playlist = self._reload_playlist(res)
|
playlist = self._reload_playlist(res)
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
raise StreamError(err)
|
raise StreamError(err) from err
|
||||||
|
|
||||||
if playlist.is_master:
|
if playlist.is_master:
|
||||||
raise StreamError(f"Attempted to play a variant playlist, use 'hls://{self.stream.url}' instead")
|
raise StreamError(f"Attempted to play a variant playlist, use 'hls://{self.stream.url}' instead")
|
||||||
|
@ -656,7 +656,7 @@ class HLSStream(HTTPStream):
|
||||||
try:
|
try:
|
||||||
multivariant = cls._get_variant_playlist(res)
|
multivariant = cls._get_variant_playlist(res)
|
||||||
except ValueError as err:
|
except ValueError as err:
|
||||||
raise OSError(f"Failed to parse playlist: {err}")
|
raise OSError(f"Failed to parse playlist: {err}") from err
|
||||||
|
|
||||||
stream_name: Optional[str]
|
stream_name: Optional[str]
|
||||||
stream: Union["HLSStream", "MuxedHLSStream"]
|
stream: Union["HLSStream", "MuxedHLSStream"]
|
||||||
|
|
|
@ -193,7 +193,7 @@ class M3U8Parser:
|
||||||
if "_TAGS" in self.__class__.__dict__:
|
if "_TAGS" in self.__class__.__dict__:
|
||||||
return
|
return
|
||||||
tags = {}
|
tags = {}
|
||||||
setattr(self.__class__, "_TAGS", tags)
|
self.__class__._TAGS = tags
|
||||||
for name, method in inspect.getmembers(self.__class__, inspect.isfunction):
|
for name, method in inspect.getmembers(self.__class__, inspect.isfunction):
|
||||||
if not name.startswith("parse_tag_"):
|
if not name.startswith("parse_tag_"):
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -46,7 +46,7 @@ class Formatter:
|
||||||
def _format(self, string: str, mapper: Callable[[str], str], defaults: Dict[str, str]) -> str:
|
def _format(self, string: str, mapper: Callable[[str], str], defaults: Dict[str, str]) -> str:
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
for literal_text, field_name, format_spec, conversion in _stringformatter.parse(string):
|
for literal_text, field_name, format_spec, _conversion in _stringformatter.parse(string):
|
||||||
if literal_text:
|
if literal_text:
|
||||||
result.append(literal_text)
|
result.append(literal_text)
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,8 @@ class Country:
|
||||||
c.name,
|
c.name,
|
||||||
getattr(c, "official_name", c.name),
|
getattr(c, "official_name", c.name),
|
||||||
)
|
)
|
||||||
except (LookupError, KeyError):
|
except LookupError as err:
|
||||||
raise LookupError(f"Invalid country code: {country}")
|
raise LookupError(f"Invalid country code: {country}") from err
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (
|
return (
|
||||||
|
@ -76,8 +76,8 @@ class Language:
|
||||||
lang.name,
|
lang.name,
|
||||||
getattr(lang, "bibliographic", ""),
|
getattr(lang, "bibliographic", ""),
|
||||||
)
|
)
|
||||||
except (LookupError, KeyError):
|
except LookupError as err:
|
||||||
raise LookupError(f"Invalid language code: {language}")
|
raise LookupError(f"Invalid language code: {language}") from err
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -15,7 +15,7 @@ def _parse(parser, data, name, exception, schema, *args, **kwargs):
|
||||||
if len(snippet) > 35:
|
if len(snippet) > 35:
|
||||||
snippet = f"{snippet[:35]} ..."
|
snippet = f"{snippet[:35]} ..."
|
||||||
|
|
||||||
raise exception(f"Unable to parse {name}: {err} ({snippet})")
|
raise exception(f"Unable to parse {name}: {err} ({snippet})") # noqa: B904
|
||||||
|
|
||||||
if schema:
|
if schema:
|
||||||
parsed = schema.validate(parsed, name=name, exception=exception)
|
parsed = schema.validate(parsed, name=name, exception=exception)
|
||||||
|
|
|
@ -313,7 +313,7 @@ def open_stream(stream):
|
||||||
try:
|
try:
|
||||||
stream_fd = stream.open()
|
stream_fd = stream.open()
|
||||||
except StreamError as err:
|
except StreamError as err:
|
||||||
raise StreamError(f"Could not open stream: {err}")
|
raise StreamError(f"Could not open stream: {err}") from err
|
||||||
|
|
||||||
# Read 8192 bytes before proceeding to check for errors.
|
# Read 8192 bytes before proceeding to check for errors.
|
||||||
# This is to avoid opening the output unnecessarily.
|
# This is to avoid opening the output unnecessarily.
|
||||||
|
@ -322,7 +322,7 @@ def open_stream(stream):
|
||||||
prebuffer = stream_fd.read(8192)
|
prebuffer = stream_fd.read(8192)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
stream_fd.close()
|
stream_fd.close()
|
||||||
raise StreamError(f"Failed to read data from stream: {err}")
|
raise StreamError(f"Failed to read data from stream: {err}") from err
|
||||||
|
|
||||||
if not prebuffer:
|
if not prebuffer:
|
||||||
stream_fd.close()
|
stream_fd.close()
|
||||||
|
@ -412,12 +412,12 @@ def handle_stream(plugin: Plugin, streams: Dict[str, Stream], stream_name: str)
|
||||||
|
|
||||||
formatter = get_formatter(plugin)
|
formatter = get_formatter(plugin)
|
||||||
|
|
||||||
for stream_name in [stream_name] + alt_streams:
|
for name in [stream_name] + alt_streams:
|
||||||
stream = streams[stream_name]
|
stream = streams[name]
|
||||||
stream_type = type(stream).shortname()
|
stream_type = type(stream).shortname()
|
||||||
|
|
||||||
if stream_type in args.player_passthrough and not file_output:
|
if stream_type in args.player_passthrough and not file_output:
|
||||||
log.info(f"Opening stream: {stream_name} ({stream_type})")
|
log.info(f"Opening stream: {name} ({stream_type})")
|
||||||
success = output_stream_passthrough(stream, formatter)
|
success = output_stream_passthrough(stream, formatter)
|
||||||
elif args.player_external_http:
|
elif args.player_external_http:
|
||||||
return output_stream_http(
|
return output_stream_http(
|
||||||
|
@ -431,7 +431,7 @@ def handle_stream(plugin: Plugin, streams: Dict[str, Stream], stream_name: str)
|
||||||
elif args.player_continuous_http and not file_output:
|
elif args.player_continuous_http and not file_output:
|
||||||
return output_stream_http(plugin, streams, formatter)
|
return output_stream_http(plugin, streams, formatter)
|
||||||
else:
|
else:
|
||||||
log.info(f"Opening stream: {stream_name} ({stream_type})")
|
log.info(f"Opening stream: {name} ({stream_type})")
|
||||||
success = output_stream(stream, formatter)
|
success = output_stream(stream, formatter)
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
|
@ -502,17 +502,13 @@ def format_valid_streams(plugin: Plugin, streams: Dict[str, Stream]) -> str:
|
||||||
delimiter = ", "
|
delimiter = ", "
|
||||||
validstreams = []
|
validstreams = []
|
||||||
|
|
||||||
for name, stream in sorted(streams.items(),
|
for name, stream in sorted(streams.items(), key=lambda s: plugin.stream_weight(s[0])):
|
||||||
key=lambda stream: plugin.stream_weight(stream[0])):
|
|
||||||
if name in STREAM_SYNONYMS:
|
if name in STREAM_SYNONYMS:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
def synonymfilter(n):
|
synonyms = [key for key, value in streams.items() if stream is value and key != name]
|
||||||
return stream is streams[n] and n is not name
|
|
||||||
|
|
||||||
synonyms = list(filter(synonymfilter, streams.keys()))
|
if synonyms:
|
||||||
|
|
||||||
if len(synonyms) > 0:
|
|
||||||
joined = delimiter.join(synonyms)
|
joined = delimiter.join(synonyms)
|
||||||
name = f"{name} ({joined})"
|
name = f"{name} ({joined})"
|
||||||
|
|
||||||
|
|
|
@ -50,8 +50,8 @@ class HTTPServer:
|
||||||
def bind(self, host="127.0.0.1", port=0):
|
def bind(self, host="127.0.0.1", port=0):
|
||||||
try:
|
try:
|
||||||
self.socket.bind((host or "", port))
|
self.socket.bind((host or "", port))
|
||||||
except OSError as err:
|
except OSError:
|
||||||
raise OSError(err)
|
raise
|
||||||
|
|
||||||
self.socket.listen(1)
|
self.socket.listen(1)
|
||||||
self.bound = True
|
self.bound = True
|
||||||
|
@ -65,27 +65,27 @@ class HTTPServer:
|
||||||
try:
|
try:
|
||||||
conn, addr = self.socket.accept()
|
conn, addr = self.socket.accept()
|
||||||
conn.settimeout(None)
|
conn.settimeout(None)
|
||||||
except socket.timeout:
|
except socket.timeout as err:
|
||||||
raise OSError("Socket accept timed out")
|
raise OSError("Socket accept timed out") from err
|
||||||
|
|
||||||
try:
|
try:
|
||||||
req_data = conn.recv(1024)
|
req_data = conn.recv(1024)
|
||||||
except OSError:
|
except OSError as err:
|
||||||
raise OSError("Failed to read data from socket")
|
raise OSError("Failed to read data from socket") from err
|
||||||
|
|
||||||
req = HTTPRequest(req_data)
|
req = HTTPRequest(req_data)
|
||||||
if req.command not in ("GET", "HEAD"):
|
if req.command not in ("GET", "HEAD"):
|
||||||
conn.send(b"HTTP/1.1 501 Not Implemented\r\n")
|
conn.send(b"HTTP/1.1 501 Not Implemented\r\n")
|
||||||
conn.close()
|
conn.close()
|
||||||
raise OSError("Invalid request method: {0}".format(req.command))
|
raise OSError(f"Invalid request method: {req.command}")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn.send(b"HTTP/1.1 200 OK\r\n")
|
conn.send(b"HTTP/1.1 200 OK\r\n")
|
||||||
conn.send(b"Server: Streamlink\r\n")
|
conn.send(b"Server: Streamlink\r\n")
|
||||||
conn.send(b"Content-Type: video/unknown\r\n")
|
conn.send(b"Content-Type: video/unknown\r\n")
|
||||||
conn.send(b"\r\n")
|
conn.send(b"\r\n")
|
||||||
except OSError:
|
except OSError as err:
|
||||||
raise OSError("Failed to write data to socket")
|
raise OSError("Failed to write data to socket") from err
|
||||||
|
|
||||||
# We don't want to send any data on HEAD requests.
|
# We don't want to send any data on HEAD requests.
|
||||||
if req.command == "HEAD":
|
if req.command == "HEAD":
|
||||||
|
|
|
@ -121,7 +121,7 @@ class ProgressFormatter:
|
||||||
length = 0
|
length = 0
|
||||||
# Get literal texts, static segments and variable segments from the parsed format
|
# Get literal texts, static segments and variable segments from the parsed format
|
||||||
# and calculate the overall length of the literal texts and static segments after substituting them.
|
# and calculate the overall length of the literal texts and static segments after substituting them.
|
||||||
for literal_text, field_name, format_spec, conversion in fmt:
|
for literal_text, field_name, format_spec, _conversion in fmt:
|
||||||
static.append(literal_text)
|
static.append(literal_text)
|
||||||
length += len(literal_text)
|
length += len(literal_text)
|
||||||
if field_name is None:
|
if field_name is None:
|
||||||
|
|
|
@ -226,7 +226,7 @@ class TestMixinStreamHLS(unittest.TestCase):
|
||||||
def await_write(self, write_calls=1, timeout=TIMEOUT_AWAIT_WRITE) -> None:
|
def await_write(self, write_calls=1, timeout=TIMEOUT_AWAIT_WRITE) -> None:
|
||||||
writer: EventedHLSStreamWriter = self.thread.reader.writer # type: ignore[assignment]
|
writer: EventedHLSStreamWriter = self.thread.reader.writer # type: ignore[assignment]
|
||||||
assert writer.is_alive()
|
assert writer.is_alive()
|
||||||
for write_call in range(write_calls):
|
for _ in range(write_calls):
|
||||||
assert writer.handshake.step(timeout)
|
assert writer.handshake.step(timeout)
|
||||||
|
|
||||||
# make one read call on the read thread and wait until it has finished
|
# make one read call on the read thread and wait until it has finished
|
||||||
|
|
|
@ -45,7 +45,7 @@ class TestPlugin(Plugin):
|
||||||
|
|
||||||
if "UnsortableStreamNames" in self.url:
|
if "UnsortableStreamNames" in self.url:
|
||||||
def gen():
|
def gen():
|
||||||
for i in range(3):
|
for _ in range(3):
|
||||||
yield "vod", HTTPStream(self.session, "http://test.se/stream")
|
yield "vod", HTTPStream(self.session, "http://test.se/stream")
|
||||||
|
|
||||||
return gen()
|
return gen()
|
||||||
|
|
|
@ -117,7 +117,8 @@ class PluginCanHandleUrl:
|
||||||
# ---- tests
|
# ---- tests
|
||||||
|
|
||||||
def test_class_setup(self):
|
def test_class_setup(self):
|
||||||
assert issubclass(getattr(self, "__plugin__"), Plugin), "Test has a __plugin__ that is a subclass of the Plugin class"
|
assert hasattr(self, "__plugin__"), "Test has a __plugin__ attribute"
|
||||||
|
assert issubclass(self.__plugin__, Plugin), "Test has a __plugin__ that is a subclass of the Plugin class"
|
||||||
assert len(self.should_match) + len(self.should_match_groups) > 0, "Test has at least one positive URL"
|
assert len(self.should_match) + len(self.should_match_groups) > 0, "Test has at least one positive URL"
|
||||||
|
|
||||||
def test_class_name(self, classnames: Set[str]):
|
def test_class_name(self, classnames: Set[str]):
|
||||||
|
|
|
@ -23,7 +23,7 @@ def _parse_xml(data, strip_ns=False):
|
||||||
if len(snippet) > 35:
|
if len(snippet) > 35:
|
||||||
snippet = f"{snippet[:35]} ..."
|
snippet = f"{snippet[:35]} ..."
|
||||||
|
|
||||||
raise ValueError("Unable to parse XML: {0} ({1})".format(err, snippet))
|
raise ValueError(f"Unable to parse XML: {err} ({snippet})") from err
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
|
|
|
@ -121,8 +121,8 @@ class TestDASHStreamParseManifest:
|
||||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||||
assert getattr(streams["720p"].audio_representation, "lang") == "en"
|
assert getattr(streams["720p"].audio_representation, "lang", None) == "en"
|
||||||
assert getattr(streams["1080p"].audio_representation, "lang") == "en"
|
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en"
|
||||||
|
|
||||||
def test_audio_multi_lang_alpha3(self, session: Streamlink, mpd: Mock):
|
def test_audio_multi_lang_alpha3(self, session: Streamlink, mpd: Mock):
|
||||||
adaptationset = Mock(
|
adaptationset = Mock(
|
||||||
|
@ -139,8 +139,8 @@ class TestDASHStreamParseManifest:
|
||||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||||
assert getattr(streams["720p"].audio_representation, "lang") == "eng"
|
assert getattr(streams["720p"].audio_representation, "lang", None) == "eng"
|
||||||
assert getattr(streams["1080p"].audio_representation, "lang") == "eng"
|
assert getattr(streams["1080p"].audio_representation, "lang", None) == "eng"
|
||||||
|
|
||||||
def test_audio_invalid_lang(self, session: Streamlink, mpd: Mock):
|
def test_audio_invalid_lang(self, session: Streamlink, mpd: Mock):
|
||||||
adaptationset = Mock(
|
adaptationset = Mock(
|
||||||
|
@ -156,8 +156,8 @@ class TestDASHStreamParseManifest:
|
||||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||||
assert getattr(streams["720p"].audio_representation, "lang") == "en_no_voice"
|
assert getattr(streams["720p"].audio_representation, "lang", None) == "en_no_voice"
|
||||||
assert getattr(streams["1080p"].audio_representation, "lang") == "en_no_voice"
|
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en_no_voice"
|
||||||
|
|
||||||
def test_audio_multi_lang_locale(self, monkeypatch: pytest.MonkeyPatch, session: Streamlink, mpd: Mock):
|
def test_audio_multi_lang_locale(self, monkeypatch: pytest.MonkeyPatch, session: Streamlink, mpd: Mock):
|
||||||
session.set_option("locale", "es_ES")
|
session.set_option("locale", "es_ES")
|
||||||
|
@ -176,8 +176,8 @@ class TestDASHStreamParseManifest:
|
||||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||||
assert getattr(streams["720p"].audio_representation, "lang") == "es"
|
assert getattr(streams["720p"].audio_representation, "lang", None) == "es"
|
||||||
assert getattr(streams["1080p"].audio_representation, "lang") == "es"
|
assert getattr(streams["1080p"].audio_representation, "lang", None) == "es"
|
||||||
|
|
||||||
# Verify the fix for https://github.com/streamlink/streamlink/issues/3365
|
# Verify the fix for https://github.com/streamlink/streamlink/issues/3365
|
||||||
def test_duplicated_resolutions(self, session: Streamlink, mpd: Mock):
|
def test_duplicated_resolutions(self, session: Streamlink, mpd: Mock):
|
||||||
|
@ -210,9 +210,9 @@ class TestDASHStreamParseManifest:
|
||||||
|
|
||||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||||
assert getattr(streams["1080p"].video_representation, "bandwidth") == pytest.approx(128.0)
|
assert getattr(streams["1080p"].video_representation, "bandwidth", None) == pytest.approx(128.0)
|
||||||
assert getattr(streams["1080p_alt"].video_representation, "bandwidth") == pytest.approx(64.0)
|
assert getattr(streams["1080p_alt"].video_representation, "bandwidth", None) == pytest.approx(64.0)
|
||||||
assert getattr(streams["1080p_alt2"].video_representation, "bandwidth") == pytest.approx(32.0)
|
assert getattr(streams["1080p_alt2"].video_representation, "bandwidth", None) == pytest.approx(32.0)
|
||||||
|
|
||||||
@pytest.mark.parametrize("adaptationset", [
|
@pytest.mark.parametrize("adaptationset", [
|
||||||
pytest.param(
|
pytest.param(
|
||||||
|
|
|
@ -450,7 +450,7 @@ class TestMPDParser(unittest.TestCase):
|
||||||
assert mpd.get_representation((None, "0", "audio1")) is None
|
assert mpd.get_representation((None, "0", "audio1")) is None
|
||||||
assert mpd.get_representation(("period-0", None, "audio1")) is None
|
assert mpd.get_representation(("period-0", None, "audio1")) is None
|
||||||
|
|
||||||
assert getattr(mpd.get_representation(("period-0", "0", "audio1")), "mimeType") == "audio/mp4"
|
assert getattr(mpd.get_representation(("period-0", "0", "audio1")), "mimeType", None) == "audio/mp4"
|
||||||
assert getattr(mpd.get_representation(("period-0", "0", "audio2")), "mimeType") == "audio/mp4"
|
assert getattr(mpd.get_representation(("period-0", "0", "audio2")), "mimeType", None) == "audio/mp4"
|
||||||
assert getattr(mpd.get_representation(("period-0", None, "video1")), "mimeType") == "video/mp4"
|
assert getattr(mpd.get_representation(("period-0", None, "video1")), "mimeType", None) == "video/mp4"
|
||||||
assert getattr(mpd.get_representation(("period-0", None, "video2")), "mimeType") == "video/mp4"
|
assert getattr(mpd.get_representation(("period-0", None, "video2")), "mimeType", None) == "video/mp4"
|
||||||
|
|
|
@ -447,7 +447,7 @@ class TestHlsPlaylistReloadTime(TestMixinStreamHLS, unittest.TestCase):
|
||||||
}))
|
}))
|
||||||
|
|
||||||
def subject(self, *args, **kwargs):
|
def subject(self, *args, **kwargs):
|
||||||
thread, segments = super().subject(start=False, *args, **kwargs)
|
thread, segments = super().subject(*args, start=False, **kwargs)
|
||||||
|
|
||||||
# mock the worker thread's _playlist_reload_time method, so that the main thread can wait on its call
|
# mock the worker thread's _playlist_reload_time method, so that the main thread can wait on its call
|
||||||
playlist_reload_time_called = Event()
|
playlist_reload_time_called = Event()
|
||||||
|
|
|
@ -16,8 +16,8 @@ def assert_validationerror(exception, expected):
|
||||||
|
|
||||||
|
|
||||||
def test_text_is_str(recwarn: pytest.WarningsRecorder):
|
def test_text_is_str(recwarn: pytest.WarningsRecorder):
|
||||||
assert "text" not in getattr(validate, "__dict__")
|
assert "text" not in getattr(validate, "__dict__", {})
|
||||||
assert "text" in getattr(validate, "__all__")
|
assert "text" in getattr(validate, "__all__", [])
|
||||||
assert validate.text is str, "Exports text as str alias for backwards compatiblity"
|
assert validate.text is str, "Exports text as str alias for backwards compatiblity"
|
||||||
assert [(record.category, str(record.message)) for record in recwarn.list] == [
|
assert [(record.category, str(record.message)) for record in recwarn.list] == [
|
||||||
(
|
(
|
||||||
|
@ -644,7 +644,7 @@ class TestAttrSchema:
|
||||||
def obj(self):
|
def obj(self):
|
||||||
obj1 = self.Subject()
|
obj1 = self.Subject()
|
||||||
obj2 = self.Subject()
|
obj2 = self.Subject()
|
||||||
setattr(obj1, "bar", obj2)
|
obj1.bar = obj2
|
||||||
|
|
||||||
return obj1
|
return obj1
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue