mirror of https://github.com/streamlink/streamlink
chore: add "B" rules to ruff config
This commit is contained in:
parent
90ccc1039c
commit
5e6f03c3cd
|
@ -217,7 +217,7 @@ class PluginFinder:
|
|||
return pluginmetadata
|
||||
|
||||
except Exception as err:
|
||||
raise ExtensionError(f"Error while parsing plugin file {pluginfile.name}", err)
|
||||
raise ExtensionError(f"Error while parsing plugin file {pluginfile.name}", err) from err
|
||||
|
||||
|
||||
class PluginsDirective(Directive):
|
||||
|
|
|
@ -7,7 +7,7 @@ from docutils import nodes
|
|||
from sphinx.util.nodes import split_explicit_title
|
||||
|
||||
|
||||
def releaseref_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
|
||||
def releaseref_role(name, rawtext, text, lineno, inliner, options=None, content=None):
|
||||
config = inliner.document.settings.env.config
|
||||
text = text.replace("|version|", config.version)
|
||||
text = text.replace("|release|", config.release)
|
||||
|
@ -16,7 +16,7 @@ def releaseref_role(name, rawtext, text, lineno, inliner, options={}, content=[]
|
|||
if not has_explicit_title:
|
||||
title = os.path.basename(target)
|
||||
|
||||
node = nodes.reference(rawtext, title, refuri=target, **options)
|
||||
node = nodes.reference(rawtext, title, refuri=target, **(options or {}))
|
||||
|
||||
return [node], []
|
||||
|
||||
|
|
|
@ -81,6 +81,8 @@ select = [
|
|||
"I",
|
||||
# flake8-builtins
|
||||
"A",
|
||||
# flake8-bugbear
|
||||
"B",
|
||||
# flake8-commas
|
||||
"COM",
|
||||
# flake8-comprehensions
|
||||
|
@ -100,6 +102,7 @@ select = [
|
|||
]
|
||||
extend-ignore = [
|
||||
"A003", # builtin-attribute-shadowing
|
||||
"B028", # no-explicit-stacklevel
|
||||
"C408", # unnecessary-collection-call
|
||||
"ISC003", # explicit-string-concatenation
|
||||
"PLC1901", # compare-to-empty-string
|
||||
|
|
|
@ -128,7 +128,7 @@ class Git:
|
|||
ref,
|
||||
)
|
||||
except subprocess.CalledProcessError as err:
|
||||
raise ValueError(f"Could not get tag from git:\n{err.stderr}")
|
||||
raise ValueError(f"Could not get tag from git:\n{err.stderr}") from err
|
||||
|
||||
@classmethod
|
||||
def shortlog(cls, start: str, end: str) -> str:
|
||||
|
@ -141,7 +141,7 @@ class Git:
|
|||
f"{start}...{end}",
|
||||
)
|
||||
except subprocess.CalledProcessError as err:
|
||||
raise ValueError(f"Could not get shortlog from git:\n{err.stderr}")
|
||||
raise ValueError(f"Could not get shortlog from git:\n{err.stderr}") from err
|
||||
|
||||
|
||||
class GitHubAPI:
|
||||
|
@ -375,15 +375,15 @@ class Release:
|
|||
log.debug(f"Opening release template file: {self.template}")
|
||||
try:
|
||||
return self._read_file(self.template)
|
||||
except IOError:
|
||||
raise IOError("Missing release template file")
|
||||
except OSError as err:
|
||||
raise OSError("Missing release template file") from err
|
||||
|
||||
def _read_changelog(self):
|
||||
log.debug(f"Opening changelog file: {self.changelog}")
|
||||
try:
|
||||
return self._read_file(self.changelog)
|
||||
except IOError:
|
||||
raise IOError("Missing changelog file")
|
||||
except OSError as err:
|
||||
raise OSError("Missing changelog file") from err
|
||||
|
||||
def _get_changelog(self) -> dict:
|
||||
changelog = self._read_changelog()
|
||||
|
@ -444,7 +444,7 @@ class Release:
|
|||
return jinjatemplate.render(context)
|
||||
|
||||
|
||||
def main(args: object):
|
||||
def main(args: argparse.Namespace):
|
||||
# if no tag was provided, get the current tag from `git describe --tags`
|
||||
tag = args.tag or Git.tag()
|
||||
if not tag:
|
||||
|
|
|
@ -65,8 +65,8 @@ def main(api_key: str, file: Path):
|
|||
result: dict = data and data.get("result") or {}
|
||||
if result.get("code") != "success":
|
||||
raise ValueError(result.get("message") or "Missing version_data in JSON response")
|
||||
except requests.exceptions.RequestException:
|
||||
raise ValueError("Error while querying API or parsing JSON response")
|
||||
except requests.exceptions.RequestException as err:
|
||||
raise ValueError("Error while querying API or parsing JSON response") from err
|
||||
|
||||
version_data: dict = data.get("version_data") or {}
|
||||
user_agents = {}
|
||||
|
@ -75,8 +75,8 @@ def main(api_key: str, file: Path):
|
|||
for item in seq:
|
||||
try:
|
||||
obj = obj[item]
|
||||
except KeyError:
|
||||
raise ValueError(f"Invalid key: {item} ({seq})")
|
||||
except KeyError as err:
|
||||
raise ValueError(f"Invalid key: {item} ({seq})") from err
|
||||
|
||||
if type(obj) is not str:
|
||||
raise ValueError(f"Invalid result: {obj!r} ({seq})")
|
||||
|
|
|
@ -152,11 +152,11 @@ class HTTPSession(Session):
|
|||
res = super().request(
|
||||
method,
|
||||
url,
|
||||
*args,
|
||||
headers=headers,
|
||||
params=params,
|
||||
timeout=timeout,
|
||||
proxies=proxies,
|
||||
*args,
|
||||
**kwargs,
|
||||
)
|
||||
if raise_for_status and res.status_code not in acceptable_status:
|
||||
|
|
|
@ -73,8 +73,8 @@ def _deprecations():
|
|||
__all__ = [k for k in globals().keys() if not k.startswith("_")]
|
||||
__all__.extend(deprecations.keys())
|
||||
|
||||
setattr(sys.modules[__name__], "__getattr__", __getattr__)
|
||||
setattr(sys.modules[__name__], "__all__", __all__)
|
||||
sys.modules[__name__].__getattr__ = __getattr__
|
||||
sys.modules[__name__].__all__ = __all__
|
||||
|
||||
|
||||
_deprecations()
|
||||
|
|
|
@ -151,8 +151,8 @@ def _validate_pattern(schema: Pattern, value):
|
|||
|
||||
@validate.register
|
||||
def _validate_allschema(schema: AllSchema, value):
|
||||
for schema in schema.schema:
|
||||
value = validate(schema, value)
|
||||
for subschema in schema.schema:
|
||||
value = validate(subschema, value)
|
||||
|
||||
return value
|
||||
|
||||
|
@ -173,8 +173,8 @@ def _validate_anyschema(schema: AnySchema, value):
|
|||
def _validate_noneorallschema(schema: NoneOrAllSchema, value):
|
||||
if value is not None:
|
||||
try:
|
||||
for schema in schema.schema:
|
||||
value = validate(schema, value)
|
||||
for subschema in schema.schema:
|
||||
value = validate(subschema, value)
|
||||
except ValidationError as err:
|
||||
raise ValidationError(err, schema=NoneOrAllSchema) from None
|
||||
|
||||
|
@ -376,13 +376,13 @@ def validate_union(schema, value):
|
|||
@validate_union.register(dict)
|
||||
def _validate_union_dict(schema, value):
|
||||
new = type(schema)()
|
||||
for key, schema in schema.items():
|
||||
for key, subschema in schema.items():
|
||||
is_optional = isinstance(key, OptionalSchema)
|
||||
if is_optional:
|
||||
key = key.key
|
||||
|
||||
try:
|
||||
new[key] = validate(schema, value)
|
||||
new[key] = validate(subschema, value)
|
||||
except ValidationError as err:
|
||||
if is_optional:
|
||||
continue
|
||||
|
|
|
@ -123,9 +123,9 @@ def iterate_streams(streams):
|
|||
for name, stream in streams:
|
||||
if isinstance(stream, list):
|
||||
for sub_stream in stream:
|
||||
yield (name, sub_stream)
|
||||
yield name, sub_stream
|
||||
else:
|
||||
yield (name, stream)
|
||||
yield name, stream
|
||||
|
||||
|
||||
def stream_type_priority(stream_types, stream):
|
||||
|
@ -374,7 +374,7 @@ class Plugin:
|
|||
def default_stream_types(cls, streams):
|
||||
stream_types = ["hls", "http"]
|
||||
|
||||
for name, stream in iterate_streams(streams):
|
||||
for _name, stream in iterate_streams(streams):
|
||||
stream_type = type(stream).shortname()
|
||||
|
||||
if stream_type not in stream_types:
|
||||
|
@ -428,7 +428,7 @@ class Plugin:
|
|||
except NoStreamsError:
|
||||
return {}
|
||||
except (OSError, ValueError) as err:
|
||||
raise PluginError(err)
|
||||
raise PluginError(err) from err
|
||||
|
||||
if not ostreams:
|
||||
return {}
|
||||
|
@ -482,7 +482,7 @@ class Plugin:
|
|||
|
||||
# Create the best/worst synonyms
|
||||
def stream_weight_only(s):
|
||||
return (self.stream_weight(s)[0] or (len(streams) == 1 and 1))
|
||||
return self.stream_weight(s)[0] or (len(streams) == 1 and 1)
|
||||
|
||||
stream_names = filter(stream_weight_only, streams.keys())
|
||||
sorted_streams = sorted(stream_names, key=stream_weight_only)
|
||||
|
@ -633,7 +633,7 @@ class Plugin:
|
|||
try:
|
||||
return user_input_requester.ask(prompt)
|
||||
except OSError as err:
|
||||
raise FatalPluginError(f"User input error: {err}")
|
||||
raise FatalPluginError(f"User input error: {err}") from err
|
||||
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
||||
|
||||
def input_ask_password(self, prompt: str) -> str:
|
||||
|
@ -642,7 +642,7 @@ class Plugin:
|
|||
try:
|
||||
return user_input_requester.ask_password(prompt)
|
||||
except OSError as err:
|
||||
raise FatalPluginError(f"User input error: {err}")
|
||||
raise FatalPluginError(f"User input error: {err}") from err
|
||||
raise FatalPluginError("This plugin requires user input, however it is not supported on this platform")
|
||||
|
||||
|
||||
|
|
|
@ -179,14 +179,14 @@ class AbemaTV(Plugin):
|
|||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(self.SECRETKEY)
|
||||
tmp = h.digest()
|
||||
for i in range(time_struct.tm_mon):
|
||||
for _ in range(time_struct.tm_mon):
|
||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(tmp)
|
||||
tmp = h.digest()
|
||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + deviceid)
|
||||
tmp = h.digest()
|
||||
for i in range(time_struct.tm_mday % 5):
|
||||
for _ in range(time_struct.tm_mday % 5):
|
||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(tmp)
|
||||
tmp = h.digest()
|
||||
|
@ -195,7 +195,7 @@ class AbemaTV(Plugin):
|
|||
h.update(urlsafe_b64encode(tmp).rstrip(b"=") + ts_1hour_str)
|
||||
tmp = h.digest()
|
||||
|
||||
for i in range(time_struct.tm_hour % 5): # utc hour
|
||||
for _ in range(time_struct.tm_hour % 5): # utc hour
|
||||
h = hmac.new(self.SECRETKEY, digestmod=hashlib.sha256)
|
||||
h.update(tmp)
|
||||
tmp = h.digest()
|
||||
|
|
|
@ -67,7 +67,7 @@ class ArteTV(Plugin):
|
|||
|
||||
self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
|
||||
|
||||
for slot, protocol, url in sorted(streams, key=itemgetter(0)):
|
||||
for _slot, _protocol, url in sorted(streams, key=itemgetter(0)):
|
||||
return HLSStream.parse_variant_playlist(self.session, url)
|
||||
|
||||
|
||||
|
|
|
@ -134,9 +134,9 @@ class BBCiPlayer(Plugin):
|
|||
for connection in media["connection"]:
|
||||
urls[connection.get("transferFormat")].add(connection["href"])
|
||||
|
||||
for stream_type, urls in urls.items():
|
||||
log.debug(f"{len(urls)} {stream_type} streams")
|
||||
for url in list(urls):
|
||||
for stream_type, urlitems in urls.items():
|
||||
log.debug(f"{len(urlitems)} {stream_type} streams")
|
||||
for url in list(urlitems):
|
||||
try:
|
||||
if stream_type == "hls":
|
||||
yield from HLSStream.parse_variant_playlist(self.session, url).items()
|
||||
|
|
|
@ -315,7 +315,7 @@ class Crunchyroll(Plugin):
|
|||
info = api.get_info(media_id, fields=["media.name", "media.series_name",
|
||||
"media.media_type", "media.stream_data"], schema=_media_schema)
|
||||
except CrunchyrollAPIError as err:
|
||||
raise PluginError(f"Media lookup error: {err.msg}")
|
||||
raise PluginError(f"Media lookup error: {err.msg}") from err
|
||||
|
||||
if not info:
|
||||
return
|
||||
|
@ -390,7 +390,7 @@ class Crunchyroll(Plugin):
|
|||
log.info(f"Logged in as '{login_name}'")
|
||||
|
||||
except CrunchyrollAPIError as err:
|
||||
raise PluginError(f"Authentication error: {err.msg}")
|
||||
raise PluginError(f"Authentication error: {err.msg}") from err
|
||||
if not api.auth:
|
||||
log.warning("No authentication provided, you won't be able to access premium restricted content")
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ class Filmon(Plugin):
|
|||
self.session.http.get(self.url)
|
||||
|
||||
if vod_id:
|
||||
for quality, url, timeout in self.api.vod(vod_id):
|
||||
for quality, url, _timeout in self.api.vod(vod_id):
|
||||
if url.endswith(".m3u8"):
|
||||
streams = HLSStream.parse_variant_playlist(self.session, url)
|
||||
if streams:
|
||||
|
@ -234,7 +234,7 @@ class Filmon(Plugin):
|
|||
raise PluginError(f"Unable to find channel ID: {channel}")
|
||||
|
||||
try:
|
||||
for quality, url, timeout in self.api.channel(_id):
|
||||
for quality, url, _timeout in self.api.channel(_id):
|
||||
yield quality, FilmOnHLS(self.session, url, self.api, channel=_id, quality=quality)
|
||||
except Exception:
|
||||
if channel and not channel.isdigit():
|
||||
|
|
|
@ -7,7 +7,7 @@ $type live
|
|||
import logging
|
||||
import re
|
||||
|
||||
from streamlink.exceptions import FatalPluginError, NoStreamsError, PluginError
|
||||
from streamlink.exceptions import FatalPluginError, NoStreamsError
|
||||
from streamlink.plugin import Plugin, pluginargument, pluginmatcher
|
||||
from streamlink.plugin.api import validate
|
||||
from streamlink.stream.hls import HLSStream
|
||||
|
@ -158,9 +158,11 @@ class Pixiv(Plugin):
|
|||
# other co-hosts
|
||||
self.set_option("performer", co_hosts[number - 1][0])
|
||||
except FatalPluginError:
|
||||
raise PluginError("Selected performer is invalid.")
|
||||
log.error("Selected performer is invalid.")
|
||||
return
|
||||
except (IndexError, ValueError, TypeError):
|
||||
raise PluginError("Input is invalid")
|
||||
log.error("Input is invalid")
|
||||
return
|
||||
|
||||
# ignore the owner stream, if a performer is selected
|
||||
# or use it when there are no other performers
|
||||
|
|
|
@ -11,7 +11,7 @@ import logging
|
|||
import re
|
||||
from urllib.parse import urljoin
|
||||
|
||||
from streamlink.plugin import Plugin, PluginError, pluginmatcher
|
||||
from streamlink.plugin import Plugin, pluginmatcher
|
||||
from streamlink.plugin.api import validate
|
||||
from streamlink.stream.hls import HLSStream
|
||||
|
||||
|
@ -63,7 +63,8 @@ class TV4Play(Plugin):
|
|||
)
|
||||
except Exception as e:
|
||||
if "404 Client Error" in str(e):
|
||||
raise PluginError("This Video is not available")
|
||||
log.error("This Video is not available")
|
||||
return
|
||||
raise e
|
||||
log.debug("Found metadata")
|
||||
metadata = self.session.http.json(res, schema=self._meta_schema)
|
||||
|
@ -82,7 +83,8 @@ class TV4Play(Plugin):
|
|||
res = self.session.http.get(urljoin(self.api_url, metadata["mediaUri"]))
|
||||
except Exception as e:
|
||||
if "401 Client Error" in str(e):
|
||||
raise PluginError("This Video is not available in your country")
|
||||
log.error("This Video is not available in your country")
|
||||
return
|
||||
raise e
|
||||
|
||||
log.debug("Found stream data")
|
||||
|
|
|
@ -638,7 +638,7 @@ class Twitch(Plugin):
|
|||
raise PluginError
|
||||
sig, token = data
|
||||
except (PluginError, TypeError):
|
||||
raise NoStreamsError
|
||||
raise NoStreamsError # noqa: B904
|
||||
|
||||
try:
|
||||
restricted_bitrates = self.api.parse_token(token)
|
||||
|
@ -699,7 +699,7 @@ class Twitch(Plugin):
|
|||
if "404 Client Error" in err or "Failed to parse playlist" in err:
|
||||
return
|
||||
else:
|
||||
raise PluginError(err)
|
||||
raise PluginError(err) from err
|
||||
|
||||
for name in restricted_bitrates:
|
||||
if name not in streams:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
$description Russian live streaming social platform.
|
||||
$description Russian live-streaming social platform.
|
||||
$url wasd.tv
|
||||
$type live
|
||||
"""
|
||||
|
@ -75,11 +75,11 @@ class WASD(Plugin):
|
|||
for stream in json_res["media_container_streams"]:
|
||||
log.debug("media_container_status: {0}, media_container_online_status: {1}".format(
|
||||
json_res["media_container_status"], json_res["media_container_online_status"]))
|
||||
for stream in stream["stream_media"]:
|
||||
if stream["media_status"] == "STOPPED":
|
||||
hls_url = stream["media_meta"]["media_archive_url"]
|
||||
elif stream["media_status"] == "RUNNING":
|
||||
hls_url = stream["media_meta"]["media_url"]
|
||||
for stream_media in stream["stream_media"]:
|
||||
if stream_media["media_status"] == "STOPPED":
|
||||
hls_url = stream_media["media_meta"]["media_archive_url"]
|
||||
else:
|
||||
hls_url = stream_media["media_meta"]["media_url"]
|
||||
|
||||
yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ class WWENetwork(Plugin):
|
|||
return self.auth_token
|
||||
|
||||
@property # type: ignore
|
||||
@lru_cache(maxsize=128)
|
||||
@lru_cache(maxsize=128) # noqa: B019
|
||||
def item_config(self):
|
||||
log.debug("Loading page config")
|
||||
p = urlparse(self.url)
|
||||
|
|
|
@ -224,7 +224,7 @@ class YouTube(Plugin):
|
|||
best_audio_itag = None
|
||||
|
||||
# Extract audio streams from the adaptive format list
|
||||
for url, label, itag, mimeType in adaptive_formats:
|
||||
for url, _label, itag, mimeType in adaptive_formats:
|
||||
if url is None:
|
||||
continue
|
||||
|
||||
|
|
|
@ -511,7 +511,7 @@ class Streamlink:
|
|||
plugincls = self.plugins[plugin]
|
||||
return plugincls.get_option(key)
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
@lru_cache(maxsize=128) # noqa: B019
|
||||
def resolve_url(
|
||||
self,
|
||||
url: str,
|
||||
|
@ -614,7 +614,7 @@ class Streamlink:
|
|||
"""
|
||||
|
||||
success = False
|
||||
for loader, name, ispkg in pkgutil.iter_modules([path]):
|
||||
for _loader, name, _ispkg in pkgutil.iter_modules([path]):
|
||||
# set the full plugin module name
|
||||
# use the "streamlink.plugins." prefix even for sideloaded plugins
|
||||
module_name = f"streamlink.plugins.{name}"
|
||||
|
|
|
@ -951,7 +951,7 @@ class SegmentTimeline(MPDNode):
|
|||
if t == 0 and tsegment.t is not None:
|
||||
t = tsegment.t
|
||||
# check the start time from MPD
|
||||
for repeated_i in range(tsegment.r + 1):
|
||||
for _ in range(tsegment.r + 1):
|
||||
yield TimelineSegment(t, tsegment.d)
|
||||
t += tsegment.d
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ class MuxedStream(Stream):
|
|||
maps = self.options.get("maps", [])
|
||||
# only update the maps values if they haven't been set
|
||||
update_maps = not maps
|
||||
for i, substream in enumerate(self.substreams):
|
||||
for substream in self.substreams:
|
||||
log.debug("Opening {0} substream".format(substream.shortname()))
|
||||
if update_maps:
|
||||
maps.append(len(fds))
|
||||
|
|
|
@ -330,7 +330,7 @@ class HLSStreamWorker(SegmentedStreamWorker):
|
|||
try:
|
||||
playlist = self._reload_playlist(res)
|
||||
except ValueError as err:
|
||||
raise StreamError(err)
|
||||
raise StreamError(err) from err
|
||||
|
||||
if playlist.is_master:
|
||||
raise StreamError(f"Attempted to play a variant playlist, use 'hls://{self.stream.url}' instead")
|
||||
|
@ -656,7 +656,7 @@ class HLSStream(HTTPStream):
|
|||
try:
|
||||
multivariant = cls._get_variant_playlist(res)
|
||||
except ValueError as err:
|
||||
raise OSError(f"Failed to parse playlist: {err}")
|
||||
raise OSError(f"Failed to parse playlist: {err}") from err
|
||||
|
||||
stream_name: Optional[str]
|
||||
stream: Union["HLSStream", "MuxedHLSStream"]
|
||||
|
|
|
@ -193,7 +193,7 @@ class M3U8Parser:
|
|||
if "_TAGS" in self.__class__.__dict__:
|
||||
return
|
||||
tags = {}
|
||||
setattr(self.__class__, "_TAGS", tags)
|
||||
self.__class__._TAGS = tags
|
||||
for name, method in inspect.getmembers(self.__class__, inspect.isfunction):
|
||||
if not name.startswith("parse_tag_"):
|
||||
continue
|
||||
|
|
|
@ -46,7 +46,7 @@ class Formatter:
|
|||
def _format(self, string: str, mapper: Callable[[str], str], defaults: Dict[str, str]) -> str:
|
||||
result = []
|
||||
|
||||
for literal_text, field_name, format_spec, conversion in _stringformatter.parse(string):
|
||||
for literal_text, field_name, format_spec, _conversion in _stringformatter.parse(string):
|
||||
if literal_text:
|
||||
result.append(literal_text)
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ class Country:
|
|||
c.name,
|
||||
getattr(c, "official_name", c.name),
|
||||
)
|
||||
except (LookupError, KeyError):
|
||||
raise LookupError(f"Invalid country code: {country}")
|
||||
except LookupError as err:
|
||||
raise LookupError(f"Invalid country code: {country}") from err
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
|
@ -76,8 +76,8 @@ class Language:
|
|||
lang.name,
|
||||
getattr(lang, "bibliographic", ""),
|
||||
)
|
||||
except (LookupError, KeyError):
|
||||
raise LookupError(f"Invalid language code: {language}")
|
||||
except LookupError as err:
|
||||
raise LookupError(f"Invalid language code: {language}") from err
|
||||
|
||||
def __eq__(self, other):
|
||||
return (
|
||||
|
|
|
@ -15,7 +15,7 @@ def _parse(parser, data, name, exception, schema, *args, **kwargs):
|
|||
if len(snippet) > 35:
|
||||
snippet = f"{snippet[:35]} ..."
|
||||
|
||||
raise exception(f"Unable to parse {name}: {err} ({snippet})")
|
||||
raise exception(f"Unable to parse {name}: {err} ({snippet})") # noqa: B904
|
||||
|
||||
if schema:
|
||||
parsed = schema.validate(parsed, name=name, exception=exception)
|
||||
|
|
|
@ -313,7 +313,7 @@ def open_stream(stream):
|
|||
try:
|
||||
stream_fd = stream.open()
|
||||
except StreamError as err:
|
||||
raise StreamError(f"Could not open stream: {err}")
|
||||
raise StreamError(f"Could not open stream: {err}") from err
|
||||
|
||||
# Read 8192 bytes before proceeding to check for errors.
|
||||
# This is to avoid opening the output unnecessarily.
|
||||
|
@ -322,7 +322,7 @@ def open_stream(stream):
|
|||
prebuffer = stream_fd.read(8192)
|
||||
except OSError as err:
|
||||
stream_fd.close()
|
||||
raise StreamError(f"Failed to read data from stream: {err}")
|
||||
raise StreamError(f"Failed to read data from stream: {err}") from err
|
||||
|
||||
if not prebuffer:
|
||||
stream_fd.close()
|
||||
|
@ -412,12 +412,12 @@ def handle_stream(plugin: Plugin, streams: Dict[str, Stream], stream_name: str)
|
|||
|
||||
formatter = get_formatter(plugin)
|
||||
|
||||
for stream_name in [stream_name] + alt_streams:
|
||||
stream = streams[stream_name]
|
||||
for name in [stream_name] + alt_streams:
|
||||
stream = streams[name]
|
||||
stream_type = type(stream).shortname()
|
||||
|
||||
if stream_type in args.player_passthrough and not file_output:
|
||||
log.info(f"Opening stream: {stream_name} ({stream_type})")
|
||||
log.info(f"Opening stream: {name} ({stream_type})")
|
||||
success = output_stream_passthrough(stream, formatter)
|
||||
elif args.player_external_http:
|
||||
return output_stream_http(
|
||||
|
@ -431,7 +431,7 @@ def handle_stream(plugin: Plugin, streams: Dict[str, Stream], stream_name: str)
|
|||
elif args.player_continuous_http and not file_output:
|
||||
return output_stream_http(plugin, streams, formatter)
|
||||
else:
|
||||
log.info(f"Opening stream: {stream_name} ({stream_type})")
|
||||
log.info(f"Opening stream: {name} ({stream_type})")
|
||||
success = output_stream(stream, formatter)
|
||||
|
||||
if success:
|
||||
|
@ -502,17 +502,13 @@ def format_valid_streams(plugin: Plugin, streams: Dict[str, Stream]) -> str:
|
|||
delimiter = ", "
|
||||
validstreams = []
|
||||
|
||||
for name, stream in sorted(streams.items(),
|
||||
key=lambda stream: plugin.stream_weight(stream[0])):
|
||||
for name, stream in sorted(streams.items(), key=lambda s: plugin.stream_weight(s[0])):
|
||||
if name in STREAM_SYNONYMS:
|
||||
continue
|
||||
|
||||
def synonymfilter(n):
|
||||
return stream is streams[n] and n is not name
|
||||
synonyms = [key for key, value in streams.items() if stream is value and key != name]
|
||||
|
||||
synonyms = list(filter(synonymfilter, streams.keys()))
|
||||
|
||||
if len(synonyms) > 0:
|
||||
if synonyms:
|
||||
joined = delimiter.join(synonyms)
|
||||
name = f"{name} ({joined})"
|
||||
|
||||
|
|
|
@ -50,8 +50,8 @@ class HTTPServer:
|
|||
def bind(self, host="127.0.0.1", port=0):
|
||||
try:
|
||||
self.socket.bind((host or "", port))
|
||||
except OSError as err:
|
||||
raise OSError(err)
|
||||
except OSError:
|
||||
raise
|
||||
|
||||
self.socket.listen(1)
|
||||
self.bound = True
|
||||
|
@ -65,27 +65,27 @@ class HTTPServer:
|
|||
try:
|
||||
conn, addr = self.socket.accept()
|
||||
conn.settimeout(None)
|
||||
except socket.timeout:
|
||||
raise OSError("Socket accept timed out")
|
||||
except socket.timeout as err:
|
||||
raise OSError("Socket accept timed out") from err
|
||||
|
||||
try:
|
||||
req_data = conn.recv(1024)
|
||||
except OSError:
|
||||
raise OSError("Failed to read data from socket")
|
||||
except OSError as err:
|
||||
raise OSError("Failed to read data from socket") from err
|
||||
|
||||
req = HTTPRequest(req_data)
|
||||
if req.command not in ("GET", "HEAD"):
|
||||
conn.send(b"HTTP/1.1 501 Not Implemented\r\n")
|
||||
conn.close()
|
||||
raise OSError("Invalid request method: {0}".format(req.command))
|
||||
raise OSError(f"Invalid request method: {req.command}")
|
||||
|
||||
try:
|
||||
conn.send(b"HTTP/1.1 200 OK\r\n")
|
||||
conn.send(b"Server: Streamlink\r\n")
|
||||
conn.send(b"Content-Type: video/unknown\r\n")
|
||||
conn.send(b"\r\n")
|
||||
except OSError:
|
||||
raise OSError("Failed to write data to socket")
|
||||
except OSError as err:
|
||||
raise OSError("Failed to write data to socket") from err
|
||||
|
||||
# We don't want to send any data on HEAD requests.
|
||||
if req.command == "HEAD":
|
||||
|
|
|
@ -121,7 +121,7 @@ class ProgressFormatter:
|
|||
length = 0
|
||||
# Get literal texts, static segments and variable segments from the parsed format
|
||||
# and calculate the overall length of the literal texts and static segments after substituting them.
|
||||
for literal_text, field_name, format_spec, conversion in fmt:
|
||||
for literal_text, field_name, format_spec, _conversion in fmt:
|
||||
static.append(literal_text)
|
||||
length += len(literal_text)
|
||||
if field_name is None:
|
||||
|
|
|
@ -226,7 +226,7 @@ class TestMixinStreamHLS(unittest.TestCase):
|
|||
def await_write(self, write_calls=1, timeout=TIMEOUT_AWAIT_WRITE) -> None:
|
||||
writer: EventedHLSStreamWriter = self.thread.reader.writer # type: ignore[assignment]
|
||||
assert writer.is_alive()
|
||||
for write_call in range(write_calls):
|
||||
for _ in range(write_calls):
|
||||
assert writer.handshake.step(timeout)
|
||||
|
||||
# make one read call on the read thread and wait until it has finished
|
||||
|
|
|
@ -45,7 +45,7 @@ class TestPlugin(Plugin):
|
|||
|
||||
if "UnsortableStreamNames" in self.url:
|
||||
def gen():
|
||||
for i in range(3):
|
||||
for _ in range(3):
|
||||
yield "vod", HTTPStream(self.session, "http://test.se/stream")
|
||||
|
||||
return gen()
|
||||
|
|
|
@ -117,7 +117,8 @@ class PluginCanHandleUrl:
|
|||
# ---- tests
|
||||
|
||||
def test_class_setup(self):
|
||||
assert issubclass(getattr(self, "__plugin__"), Plugin), "Test has a __plugin__ that is a subclass of the Plugin class"
|
||||
assert hasattr(self, "__plugin__"), "Test has a __plugin__ attribute"
|
||||
assert issubclass(self.__plugin__, Plugin), "Test has a __plugin__ that is a subclass of the Plugin class"
|
||||
assert len(self.should_match) + len(self.should_match_groups) > 0, "Test has at least one positive URL"
|
||||
|
||||
def test_class_name(self, classnames: Set[str]):
|
||||
|
|
|
@ -23,7 +23,7 @@ def _parse_xml(data, strip_ns=False):
|
|||
if len(snippet) > 35:
|
||||
snippet = f"{snippet[:35]} ..."
|
||||
|
||||
raise ValueError("Unable to parse XML: {0} ({1})".format(err, snippet))
|
||||
raise ValueError(f"Unable to parse XML: {err} ({snippet})") from err
|
||||
|
||||
|
||||
@contextmanager
|
||||
|
|
|
@ -121,8 +121,8 @@ class TestDASHStreamParseManifest:
|
|||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||
assert getattr(streams["720p"].audio_representation, "lang") == "en"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang") == "en"
|
||||
assert getattr(streams["720p"].audio_representation, "lang", None) == "en"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en"
|
||||
|
||||
def test_audio_multi_lang_alpha3(self, session: Streamlink, mpd: Mock):
|
||||
adaptationset = Mock(
|
||||
|
@ -139,8 +139,8 @@ class TestDASHStreamParseManifest:
|
|||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||
assert getattr(streams["720p"].audio_representation, "lang") == "eng"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang") == "eng"
|
||||
assert getattr(streams["720p"].audio_representation, "lang", None) == "eng"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang", None) == "eng"
|
||||
|
||||
def test_audio_invalid_lang(self, session: Streamlink, mpd: Mock):
|
||||
adaptationset = Mock(
|
||||
|
@ -156,8 +156,8 @@ class TestDASHStreamParseManifest:
|
|||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||
assert getattr(streams["720p"].audio_representation, "lang") == "en_no_voice"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang") == "en_no_voice"
|
||||
assert getattr(streams["720p"].audio_representation, "lang", None) == "en_no_voice"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang", None) == "en_no_voice"
|
||||
|
||||
def test_audio_multi_lang_locale(self, monkeypatch: pytest.MonkeyPatch, session: Streamlink, mpd: Mock):
|
||||
session.set_option("locale", "es_ES")
|
||||
|
@ -176,8 +176,8 @@ class TestDASHStreamParseManifest:
|
|||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||
assert sorted(streams.keys()) == sorted(["720p", "1080p"])
|
||||
assert getattr(streams["720p"].audio_representation, "lang") == "es"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang") == "es"
|
||||
assert getattr(streams["720p"].audio_representation, "lang", None) == "es"
|
||||
assert getattr(streams["1080p"].audio_representation, "lang", None) == "es"
|
||||
|
||||
# Verify the fix for https://github.com/streamlink/streamlink/issues/3365
|
||||
def test_duplicated_resolutions(self, session: Streamlink, mpd: Mock):
|
||||
|
@ -210,9 +210,9 @@ class TestDASHStreamParseManifest:
|
|||
|
||||
streams = DASHStream.parse_manifest(session, "http://test/manifest.mpd")
|
||||
assert mpd.call_args_list == [call(ANY, url="http://test/manifest.mpd", base_url="http://test")]
|
||||
assert getattr(streams["1080p"].video_representation, "bandwidth") == pytest.approx(128.0)
|
||||
assert getattr(streams["1080p_alt"].video_representation, "bandwidth") == pytest.approx(64.0)
|
||||
assert getattr(streams["1080p_alt2"].video_representation, "bandwidth") == pytest.approx(32.0)
|
||||
assert getattr(streams["1080p"].video_representation, "bandwidth", None) == pytest.approx(128.0)
|
||||
assert getattr(streams["1080p_alt"].video_representation, "bandwidth", None) == pytest.approx(64.0)
|
||||
assert getattr(streams["1080p_alt2"].video_representation, "bandwidth", None) == pytest.approx(32.0)
|
||||
|
||||
@pytest.mark.parametrize("adaptationset", [
|
||||
pytest.param(
|
||||
|
|
|
@ -450,7 +450,7 @@ class TestMPDParser(unittest.TestCase):
|
|||
assert mpd.get_representation((None, "0", "audio1")) is None
|
||||
assert mpd.get_representation(("period-0", None, "audio1")) is None
|
||||
|
||||
assert getattr(mpd.get_representation(("period-0", "0", "audio1")), "mimeType") == "audio/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", "0", "audio2")), "mimeType") == "audio/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", None, "video1")), "mimeType") == "video/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", None, "video2")), "mimeType") == "video/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", "0", "audio1")), "mimeType", None) == "audio/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", "0", "audio2")), "mimeType", None) == "audio/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", None, "video1")), "mimeType", None) == "video/mp4"
|
||||
assert getattr(mpd.get_representation(("period-0", None, "video2")), "mimeType", None) == "video/mp4"
|
||||
|
|
|
@ -447,7 +447,7 @@ class TestHlsPlaylistReloadTime(TestMixinStreamHLS, unittest.TestCase):
|
|||
}))
|
||||
|
||||
def subject(self, *args, **kwargs):
|
||||
thread, segments = super().subject(start=False, *args, **kwargs)
|
||||
thread, segments = super().subject(*args, start=False, **kwargs)
|
||||
|
||||
# mock the worker thread's _playlist_reload_time method, so that the main thread can wait on its call
|
||||
playlist_reload_time_called = Event()
|
||||
|
|
|
@ -16,8 +16,8 @@ def assert_validationerror(exception, expected):
|
|||
|
||||
|
||||
def test_text_is_str(recwarn: pytest.WarningsRecorder):
|
||||
assert "text" not in getattr(validate, "__dict__")
|
||||
assert "text" in getattr(validate, "__all__")
|
||||
assert "text" not in getattr(validate, "__dict__", {})
|
||||
assert "text" in getattr(validate, "__all__", [])
|
||||
assert validate.text is str, "Exports text as str alias for backwards compatiblity"
|
||||
assert [(record.category, str(record.message)) for record in recwarn.list] == [
|
||||
(
|
||||
|
@ -644,7 +644,7 @@ class TestAttrSchema:
|
|||
def obj(self):
|
||||
obj1 = self.Subject()
|
||||
obj2 = self.Subject()
|
||||
setattr(obj1, "bar", obj2)
|
||||
obj1.bar = obj2
|
||||
|
||||
return obj1
|
||||
|
||||
|
|
Loading…
Reference in New Issue