Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WSGI Content-Length and Transfer-Encoding #75

Merged
merged 7 commits into from
Mar 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ __pycache__
*.o
node_modules/
yarn.lock
.vscode
.vscode
/venv
7 changes: 3 additions & 4 deletions bench/asgi_wsgi/falcon-wsgi.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,10 @@ def on_get(self, req, resp):
resp.content_type = falcon.MEDIA_TEXT # Default is JSON, so override
resp.text = "Hello, World!"
def on_post(self, req, resp):
raw_data = req.stream.getvalue()
print("data", raw_data)
raw_data = req.stream.read()
resp.status = falcon.HTTP_200 # This is the default status
resp.content_type = falcon.MEDIA_TEXT # Default is JSON, so override
resp.text = raw_data
resp.text = 'Ok'



Expand All @@ -23,4 +22,4 @@ def on_post(self, req, resp):
app.add_route("/", home)

if __name__ == "__main__":
WSGI(app).listen(8000, lambda config: print(f"Listening on port http://localhost:{config.port} now\n")).run(workers=8)
WSGI(app).listen(8000, lambda config: print(f"Listening on port http://localhost:{config.port} now\n")).run(workers=1)
42 changes: 38 additions & 4 deletions bench/asgi_wsgi/raw-wsgi.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,42 @@
from socketify import WSGI
from io import BytesIO

payload = None
with open("xml.zip", "rb") as file:
payload = file.read()


stream = BytesIO()
stream.write(payload)

chunk_size = 64 * 1024
content_length = len(payload)

def app_chunked(environ, start_response):
start_response('200 OK', [('Content-Type', 'application/zip'), ('Transfer-Encoding', 'chunked')])

sended = 0
while content_length > sended:
end = sended + chunk_size
yield payload[sended:end]
sended = end


def app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
yield b'Hello, World!\n'
start_response('200 OK', [('Content-Type', 'application/zip'), ('Content-Length', str(content_length))])

sended = 0
while content_length > sended:
end = sended + chunk_size
yield payload[sended:end]
sended = end

def app_hello(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain'), ('Content-Length', '13')])

yield b'Hello, World!'

if __name__ == "__main__":
WSGI(app).listen(8000, lambda config: print(f"Listening on port http://localhost:{config.port} now\n")).run(8)
from socketify import WSGI
WSGI(app_hello).listen(8000, lambda config: print(f"Listening on port http://localhost:{config.port} now\n")).run(1)
# import fastwsgi
# fastwsgi.run(wsgi_app=app_hello, host='127.0.0.1', port=8000)
2 changes: 1 addition & 1 deletion src/socketify/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ async def middleware_route(res, req, data=None):
class DecoratorRouter:
def __init__(self, app, prefix: str = "", *middlewares):
self.app = app
self.middlewares = list(*middlewares)
self.middlewares = list(middlewares)
self.prefix = prefix

def get(self, path):
Expand Down
Loading