From 34778ed818ed3fea1ebc4d8f80e82e19fd4beb74 Mon Sep 17 00:00:00 2001 From: Miguel Barao Date: Tue, 20 Nov 2018 17:04:09 +0000 Subject: [PATCH] FileHandler now serves the complete file instead of chunks. --- serve.py | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/serve.py b/serve.py index b868bfa..a23d599 100755 --- a/serve.py +++ b/serve.py @@ -182,7 +182,9 @@ class TopicHandler(BaseHandler): # Based on https://bhch.github.io/posts/2017/12/serving-large-files-with-tornado-safely-without-blocking/ # ---------------------------------------------------------------------------- class FileHandler(BaseHandler): - chunk_size = 4 * 1024 * 1024 # serve up to 4 MiB multiple times + SUPPORTED_METHODS = ['GET'] + + # chunk_size = 4 * 1024 * 1024 # serve up to 4 MiB multiple times @tornado.web.authenticated async def get(self, filename): @@ -203,17 +205,20 @@ class FileHandler(BaseHandler): # divide the file into chunks and write one chunk at a time, so # that the write does not block the ioloop for very long. with f: - chunk = f.read(self.chunk_size) - while chunk: - try: - self.write(chunk) # write the chunk to response - await self.flush() # flush the current chunk to socket - except iostream.StreamClosedError: - break # client closed the connection - finally: - del chunk - await asyncio.sleep(0) # 1 nanosecond (hack) - chunk = f.read(self.chunk_size) + self.write(f.read()) + await self.flush() + + # chunk = f.read(self.chunk_size) + # while chunk: + # try: + # self.write(chunk) # write the chunk to response + # await self.flush() # flush the current chunk to socket + # except iostream.StreamClosedError: + # break # client closed the connection + # finally: + # del chunk + # await asyncio.sleep(0) # 1 nanosecond (hack) + # chunk = f.read(self.chunk_size) # ---------------------------------------------------------------------------- -- libgit2 0.21.2