about summary refs log tree commit diff
path: root/nixpkgs/pkgs/by-name/no/noto-fonts-monochrome-emoji/noto-emoji.py
blob: 9f1eadd95bca56bb5ac274bffd7f834ae05a4d28 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
#!/usr/bin/env nix-shell
#! nix-shell -i "python3 -I" -p python3

from contextlib import contextmanager
from pathlib import Path
from typing import Iterable, Optional
from urllib import request

import hashlib, json


def getMetadata(apiKey: str, family: str = "Noto Emoji"):
    '''Fetch the Google Fonts metadata for a given family.

    An API key can be obtained by anyone with a Google account (🚮) from
      `https://developers.google.com/fonts/docs/developer_api#APIKey`
    '''
    from urllib.parse import urlencode

    with request.urlopen(
            "https://www.googleapis.com/webfonts/v1/webfonts?" +
            urlencode({ 'key': apiKey, 'family': family })
    ) as req:
        return json.load(req)

def getUrls(metadata) -> Iterable[str]:
    '''Fetch all files' URLs from Google Fonts' metadata.

    The metadata must obey the API v1 schema, and can be obtained from:
      https://www.googleapis.com/webfonts/v1/webfonts?key=${GOOGLE_FONTS_TOKEN}&family=${FAMILY}
    '''
    return ( url for i in metadata['items'] for _, url in i['files'].items() )


def hashUrl(url: str, *, hash: str = 'sha256'):
    '''Compute the hash of the data from HTTP GETing a given `url`.

    The `hash` must be an algorithm name `hashlib.new` accepts.
    '''
    with request.urlopen(url) as req:
        return hashlib.new(hash, req.read())


def sriEncode(h) -> str:
    '''Encode a hash in the SRI format.

    Takes a `hashlib` object, and produces a string that
    nixpkgs' `fetchurl` accepts as `hash` parameter.
    '''
    from base64 import b64encode
    return f"{h.name}-{b64encode(h.digest()).decode()}"

def validateSRI(sri: Optional[str]) -> Optional[str]:
    '''Decode an SRI hash, return `None` if invalid.

    This is not a full SRI hash parser, hash options aren't supported.
    '''
    from base64 import b64decode

    if sri is None:
        return None

    try:
        hashName, b64 = sri.split('-', 1)

        h = hashlib.new(hashName)
        digest = b64decode(b64, validate=True)
        assert len(digest) == h.digest_size

    except:
        return None
    else:
        return sri


def hashUrls(
    urls: Iterable[str],
    knownHashes: dict[str, str] = {},
) -> dict[str, str]:
    '''Generate a `dict` mapping URLs to SRI-encoded hashes.

    The `knownHashes` optional parameter can be used to avoid
    re-downloading files whose URL have not changed.
    '''
    return {
        url: validateSRI(knownHashes.get(url)) or sriEncode(hashUrl(url))
        for url in urls
    }


@contextmanager
def atomicFileUpdate(target: Path):
    '''Atomically replace the contents of a file.

    Yields an open file to write into; upon exiting the context,
    the file is closed and (atomically) replaces the `target`.

    Guarantees that the `target` was either successfully overwritten
    with new content and no exception was raised, or the temporary
    file was cleaned up.
    '''
    from tempfile import mkstemp
    fd, _p = mkstemp(
        dir = target.parent,
        prefix = target.name,
    )
    tmpPath = Path(_p)

    try:
        with open(fd, 'w') as f:
            yield f

        tmpPath.replace(target)

    except Exception:
        tmpPath.unlink(missing_ok = True)
        raise


if __name__ == "__main__":
    from os import environ
    from urllib.error import HTTPError

    environVar = 'GOOGLE_FONTS_TOKEN'
    currentDir = Path(__file__).parent
    metadataPath = currentDir / 'noto-emoji.json'

    try:
        apiToken = environ[environVar]
        metadata = getMetadata(apiToken)

    except (KeyError, HTTPError) as exn:
        # No API key in the environment, or the query was rejected.
        match exn:
            case KeyError if exn.args[0] == environVar:
                print(f"No '{environVar}' in the environment, "
                       "skipping metadata update")

            case HTTPError if exn.getcode() == 403:
                print("Got HTTP 403 (Forbidden)")
                if apiToken != '':
                    print("Your Google API key appears to be valid "
                          "but does not grant access to the fonts API.")
                    print("Aborting!")
                    raise SystemExit(1)

            case HTTPError if exn.getcode() == 400:
                # Printing the supposed token should be fine, as this is
                #  what the API returns on invalid tokens.
                print(f"Got HTTP 400 (Bad Request), is this really an API token: '{apiToken}' ?")
            case _:
                # Unknown error, let's bubble it up
                raise

        # In that case just use the existing metadata
        with metadataPath.open() as metadataFile:
            metadata = json.load(metadataFile)

        lastModified = metadata["items"][0]["lastModified"];
        print(f"Using metadata from file, last modified {lastModified}")

    else:
        # If metadata was successfully fetched, validate and persist it
        lastModified = metadata["items"][0]["lastModified"];
        print(f"Fetched current metadata, last modified {lastModified}")
        with atomicFileUpdate(metadataPath) as metadataFile:
            json.dump(metadata, metadataFile, indent = 2)
            metadataFile.write("\n")  # Pacify nixpkgs' dumb editor config check

    hashPath = currentDir / 'noto-emoji.hashes.json'
    try:
        with hashPath.open() as hashFile:
            hashes = json.load(hashFile)
    except FileNotFoundError:
        hashes = {}

    with atomicFileUpdate(hashPath) as hashFile:
        json.dump(
            hashUrls(getUrls(metadata), knownHashes = hashes),
            hashFile,
            indent = 2,
        )
        hashFile.write("\n")  # Pacify nixpkgs' dumb editor config check