diff options
| author | Jason Gross | 2020-05-19 15:31:50 -0400 |
|---|---|---|
| committer | Jason Gross | 2020-05-20 15:18:09 -0400 |
| commit | 8263c13919ca5a9ea59bc7cb930117215ddec3a9 (patch) | |
| tree | 3d0659f65bf6a8369a2566087409b8933f1a9934 /dev/tools | |
| parent | 19a231af66b275e83989e947b373cd44a889a319 (diff) | |
[merge-pr] Use a simpler method to get all pages
h/t SkySkimmer at
https://github.com/coq/coq/pull/12316#issuecomment-630952659
Diffstat (limited to 'dev/tools')
| -rwxr-xr-x | dev/tools/merge-pr.sh | 39 |
1 files changed, 11 insertions, 28 deletions
diff --git a/dev/tools/merge-pr.sh b/dev/tools/merge-pr.sh index ac4aafce62..82e4bd1e1e 100755 --- a/dev/tools/merge-pr.sh +++ b/dev/tools/merge-pr.sh @@ -49,35 +49,19 @@ ask_confirmation() { fi } -curl_paginate() { +curl_paginate_array() { # as per https://developer.github.com/v3/guides/traversing-with-pagination/#changing-the-number-of-items-received, GitHub will never give us more than 100 url="$1?per_page=100" - # We need to process the header to get the pagination. We have two - # options: - # - # 1. We can make an extra API call at the beginning to get the total - # number of pages, search for a rel="last" link, and then loop - # over all the pages. - # - # 2. We can ask for the header info with every single curl request, - # search for a rel="next" link to follow to the next page, and - # then parse out the body from the header. - # - # Although (1) is a bit simpler, we choose to do (2) to save an - # extra API call per invocation of curl. - while [ ! -z "${url}" ]; do - response="$(curl -si "${url}")" - # we search for something like 'link: <https://api.github.com/repositories/1377159/pulls/12129/reviews?page=2>; rel="next", <https://api.github.com/repositories/1377159/pulls/12129/reviews?page=2>; rel="last"' and take the first 'next' url - url="$(echo "${response}" | grep -m 1 -io '^link: .*>; rel="next"' | grep -o '<[^>]*>; rel="next"' | grep -o '<[^>]*>' | sed s'/[<>]//g')" - # we extract the body by looking for the first line that starts - # with [ or {, using the perl-regexp mode for grep as in - # https://stackoverflow.com/a/7167115/377022 - echo "${response}" | grep -Pzao '(?s)\n\s*[\[{].*' | tr '\0' '\n' - done -} - -curl_paginate_array() { - curl_paginate "$@" | jq '[.[]]' # we concatenate the arrays + # we keep fetching pages until the response is below the per-page limit (possibly 0 elements) + page=1 + while true; do + response="$(curl -s "${url}&page=${page}")" + echo "${response}" + if [ "$(jq 'length' <<< "$response")" -lt 100 ]; then # done + break + fi + page=$(($page + 1)) + done | jq '[.[]]' # we concatenate the arrays } check_util jq @@ -85,7 +69,6 @@ check_util curl check_util git check_util gpg check_util grep -check_util tr # command line parsing |
