Skip to content

Commit

Permalink
use libcurl in getdeps (#76)
Browse files Browse the repository at this point in the history
Summary:
X-link: facebookincubator/fizz#76

X-link: facebook/proxygen#402

X-link: facebook/folly#1735

X-link: facebookarchive/bistro#60

X-link: facebook/watchman#1012

X-link: facebook/fbthrift#487

Pull Request resolved: #114

X-link: facebook/fb303#27

When using getdeps inside of a container, Python's urllib isn't able to download from dewey lfs (see this post for details https://fb.workplace.com/groups/systemd.and.friends/permalink/2747692278870647/).

This allows for getdeps to use `libcurl` to fetch dependencies, which allows for a getdeps build to work inside the container environment.

Reviewed By: mackorone

Differential Revision: D34696330

fbshipit-source-id: 06cae87eef40dfa3cecacacee49234b6737d546f
  • Loading branch information
dgrnbrg-meta authored and facebook-github-bot committed Mar 14, 2022
1 parent 1a0c102 commit 1fc77f0
Showing 1 changed file with 28 additions and 5 deletions.
33 changes: 28 additions & 5 deletions build/fbcode_builder/getdeps/fetcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -654,15 +654,14 @@ def get_src_dir(self):


def download_url_to_file_with_progress(url: str, file_name) -> None:
print("Download %s -> %s ..." % (url, file_name))
print("Download with %s -> %s ..." % (url, file_name))

class Progress(object):
last_report = 0

def progress(self, count, block, total):
def write_update(self, total, amount):
if total == -1:
total = "(Unknown)"
amount = count * block

if sys.stdout.isatty():
sys.stdout.write("\r downloading %s of %s " % (amount, total))
Expand All @@ -675,10 +674,33 @@ def progress(self, count, block, total):
self.last_report = now
sys.stdout.flush()

def progress_pycurl(self, total, amount, _uploadtotal, _uploadamount):
self.write_update(total, amount)

def progress_urllib(self, count, block, total):
amount = count * block
self.write_update(total, amount)

progress = Progress()
start = time.time()
try:
(_filename, headers) = urlretrieve(url, file_name, reporthook=progress.progress)
if os.environ.get("GETDEPS_USE_LIBCURL") is not None:
import pycurl

with open(file_name, "wb") as f:
c = pycurl.Curl()
c.setopt(pycurl.URL, url)
c.setopt(pycurl.WRITEDATA, f)
# display progress
c.setopt(pycurl.NOPROGRESS, False)
c.setopt(pycurl.XFERINFOFUNCTION, progress.progress_pycurl)
c.perform()
c.close()
headers = None
else:
(_filename, headers) = urlretrieve(
url, file_name, reporthook=progress.progress_urllib
)
except (OSError, IOError) as exc: # noqa: B014
raise TransientFailure(
"Failed to download %s to %s: %s" % (url, file_name, str(exc))
Expand All @@ -687,7 +709,8 @@ def progress(self, count, block, total):
end = time.time()
sys.stdout.write(" [Complete in %f seconds]\n" % (end - start))
sys.stdout.flush()
print(f"{headers}")
if headers is not None:
print(f"{headers}")


class ArchiveFetcher(Fetcher):
Expand Down

0 comments on commit 1fc77f0

Please sign in to comment.