From 8263c13919ca5a9ea59bc7cb930117215ddec3a9 Mon Sep 17 00:00:00 2001 From: Jason Gross Date: Tue, 19 May 2020 15:31:50 -0400 Subject: [merge-pr] Use a simpler method to get all pages h/t SkySkimmer at https://github.com/coq/coq/pull/12316#issuecomment-630952659 --- dev/tools/merge-pr.sh | 39 +++++++++++---------------------------- 1 file changed, 11 insertions(+), 28 deletions(-) (limited to 'dev') diff --git a/dev/tools/merge-pr.sh b/dev/tools/merge-pr.sh index ac4aafce62..82e4bd1e1e 100755 --- a/dev/tools/merge-pr.sh +++ b/dev/tools/merge-pr.sh @@ -49,35 +49,19 @@ ask_confirmation() { fi } -curl_paginate() { +curl_paginate_array() { # as per https://developer.github.com/v3/guides/traversing-with-pagination/#changing-the-number-of-items-received, GitHub will never give us more than 100 url="$1?per_page=100" - # We need to process the header to get the pagination. We have two - # options: - # - # 1. We can make an extra API call at the beginning to get the total - # number of pages, search for a rel="last" link, and then loop - # over all the pages. - # - # 2. We can ask for the header info with every single curl request, - # search for a rel="next" link to follow to the next page, and - # then parse out the body from the header. - # - # Although (1) is a bit simpler, we choose to do (2) to save an - # extra API call per invocation of curl. - while [ ! -z "${url}" ]; do - response="$(curl -si "${url}")" - # we search for something like 'link: ; rel="next", ; rel="last"' and take the first 'next' url - url="$(echo "${response}" | grep -m 1 -io '^link: .*>; rel="next"' | grep -o '<[^>]*>; rel="next"' | grep -o '<[^>]*>' | sed s'/[<>]//g')" - # we extract the body by looking for the first line that starts - # with [ or {, using the perl-regexp mode for grep as in - # https://stackoverflow.com/a/7167115/377022 - echo "${response}" | grep -Pzao '(?s)\n\s*[\[{].*' | tr '\0' '\n' - done -} - -curl_paginate_array() { - curl_paginate "$@" | jq '[.[]]' # we concatenate the arrays + # we keep fetching pages until the response is below the per-page limit (possibly 0 elements) + page=1 + while true; do + response="$(curl -s "${url}&page=${page}")" + echo "${response}" + if [ "$(jq 'length' <<< "$response")" -lt 100 ]; then # done + break + fi + page=$(($page + 1)) + done | jq '[.[]]' # we concatenate the arrays } check_util jq @@ -85,7 +69,6 @@ check_util curl check_util git check_util gpg check_util grep -check_util tr # command line parsing -- cgit v1.2.3