Source code for archivebox.extractors.title

__package__ = 'archivebox.extractors'

import re
from typing import Optional

from ..index.schema import Link, ArchiveResult, ArchiveOutput, ArchiveError
from ..util import (
    enforce_types,
    is_static_file,
    download_url,
    htmldecode,
)
from ..config import (
    TIMEOUT,
    SAVE_TITLE,
    CURL_BINARY,
    CURL_VERSION,
)
from ..cli.logging import TimedProgress


HTML_TITLE_REGEX = re.compile(
    r'<title.*?>'                      # start matching text after <title> tag
    r'(.[^<>]+)',                      # get everything up to these symbols
    re.IGNORECASE | re.MULTILINE | re.DOTALL | re.UNICODE,
)


[docs]@enforce_types def should_save_title(link: Link, out_dir: Optional[str]=None) -> bool: # if link already has valid title, skip it if link.title and not link.title.lower().startswith('http'): return False if is_static_file(link.url): return False return SAVE_TITLE
[docs]@enforce_types def save_title(link: Link, out_dir: Optional[str]=None, timeout: int=TIMEOUT) -> ArchiveResult: """try to guess the page's title from its content""" output: ArchiveOutput = None cmd = [ CURL_BINARY, link.url, '|', 'grep', '<title', ] status = 'succeeded' timer = TimedProgress(timeout, prefix=' ') try: html = download_url(link.url, timeout=timeout) match = re.search(HTML_TITLE_REGEX, html) output = htmldecode(match.group(1).strip()) if match else None if not output: raise ArchiveError('Unable to detect page title') except Exception as err: status = 'failed' output = err finally: timer.end() return ArchiveResult( cmd=cmd, pwd=out_dir, cmd_version=CURL_VERSION, output=output, status=status, **timer.stats, )