parent
025ecb941c
commit
e562d5e7bb
@ -0,0 +1,52 @@
|
||||
#!/bin/env python
|
||||
from flask import Flask, redirect
|
||||
from flask_debugtoolbar import DebugToolbarExtension
|
||||
from flask_socketio import SocketIO
|
||||
import os
|
||||
|
||||
from get_args import args
|
||||
from config import base_url
|
||||
|
||||
socketio = SocketIO()
|
||||
|
||||
|
||||
def create_app():
|
||||
# Flask Setup
|
||||
app = Flask(__name__,
|
||||
template_folder=os.path.join(os.path.dirname(__file__), '..', 'views'),
|
||||
static_folder=os.path.join(os.path.dirname(__file__), '..', 'static'))
|
||||
app.route = prefix_route(app.route, base_url.rstrip('/'))
|
||||
|
||||
app.config["SECRET_KEY"] = 'test'
|
||||
|
||||
if args.dev:
|
||||
app.config["DEBUG"] = True
|
||||
# Flask-Debuger
|
||||
app.config["DEBUG_TB_ENABLED"] = True
|
||||
app.config["DEBUG_TB_PROFILER_ENABLED"] = True
|
||||
app.config["DEBUG_TB_TEMPLATE_EDITOR_ENABLED"] = True
|
||||
app.config["DEBUG_TB_INTERCEPT_REDIRECTS"] = False
|
||||
else:
|
||||
app.config["DEBUG"] = False
|
||||
# Flask-Debuger
|
||||
app.config["DEBUG_TB_ENABLED"] = False
|
||||
|
||||
toolbar = DebugToolbarExtension(app)
|
||||
|
||||
|
||||
@app.errorhandler(404)
|
||||
def http_error_handler(error):
|
||||
return redirect(base_url.rstrip('/')), 302
|
||||
|
||||
socketio.init_app(app)
|
||||
return app
|
||||
|
||||
|
||||
def prefix_route(route_function, prefix='', mask='{0}{1}'):
|
||||
# Defines a new route function with a prefix.
|
||||
# The mask argument is a `format string` formatted with, in that order: prefix, route
|
||||
def newroute(route, *args, **kwargs):
|
||||
# New function to prefix the route
|
||||
return route_function(mask.format(prefix, route), *args, **kwargs)
|
||||
|
||||
return newroute
|
@ -0,0 +1,25 @@
|
||||
import sys
|
||||
|
||||
from .client import Client
|
||||
from .middleware import WSGIApp, Middleware
|
||||
from .server import Server
|
||||
if sys.version_info >= (3, 5): # pragma: no cover
|
||||
from .asyncio_server import AsyncServer
|
||||
from .asyncio_client import AsyncClient
|
||||
from .async_drivers.asgi import ASGIApp
|
||||
try:
|
||||
from .async_drivers.tornado import get_tornado_handler
|
||||
except ImportError:
|
||||
get_tornado_handler = None
|
||||
else: # pragma: no cover
|
||||
AsyncServer = None
|
||||
AsyncClient = None
|
||||
get_tornado_handler = None
|
||||
ASGIApp = None
|
||||
|
||||
__version__ = '3.11.2'
|
||||
|
||||
__all__ = ['__version__', 'Server', 'WSGIApp', 'Middleware', 'Client']
|
||||
if AsyncServer is not None: # pragma: no cover
|
||||
__all__ += ['AsyncServer', 'ASGIApp', 'get_tornado_handler',
|
||||
'AsyncClient'],
|
@ -0,0 +1,128 @@
|
||||
import asyncio
|
||||
import sys
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
from aiohttp.web import Response, WebSocketResponse
|
||||
import six
|
||||
|
||||
|
||||
def create_route(app, engineio_server, engineio_endpoint):
|
||||
"""This function sets up the engine.io endpoint as a route for the
|
||||
application.
|
||||
|
||||
Note that both GET and POST requests must be hooked up on the engine.io
|
||||
endpoint.
|
||||
"""
|
||||
app.router.add_get(engineio_endpoint, engineio_server.handle_request)
|
||||
app.router.add_post(engineio_endpoint, engineio_server.handle_request)
|
||||
app.router.add_route('OPTIONS', engineio_endpoint,
|
||||
engineio_server.handle_request)
|
||||
|
||||
|
||||
def translate_request(request):
|
||||
"""This function takes the arguments passed to the request handler and
|
||||
uses them to generate a WSGI compatible environ dictionary.
|
||||
"""
|
||||
message = request._message
|
||||
payload = request._payload
|
||||
|
||||
uri_parts = urlsplit(message.path)
|
||||
environ = {
|
||||
'wsgi.input': payload,
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.async': True,
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.run_once': False,
|
||||
'SERVER_SOFTWARE': 'aiohttp',
|
||||
'REQUEST_METHOD': message.method,
|
||||
'QUERY_STRING': uri_parts.query or '',
|
||||
'RAW_URI': message.path,
|
||||
'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version,
|
||||
'REMOTE_ADDR': '127.0.0.1',
|
||||
'REMOTE_PORT': '0',
|
||||
'SERVER_NAME': 'aiohttp',
|
||||
'SERVER_PORT': '0',
|
||||
'aiohttp.request': request
|
||||
}
|
||||
|
||||
for hdr_name, hdr_value in message.headers.items():
|
||||
hdr_name = hdr_name.upper()
|
||||
if hdr_name == 'CONTENT-TYPE':
|
||||
environ['CONTENT_TYPE'] = hdr_value
|
||||
continue
|
||||
elif hdr_name == 'CONTENT-LENGTH':
|
||||
environ['CONTENT_LENGTH'] = hdr_value
|
||||
continue
|
||||
|
||||
key = 'HTTP_%s' % hdr_name.replace('-', '_')
|
||||
if key in environ:
|
||||
hdr_value = '%s,%s' % (environ[key], hdr_value)
|
||||
|
||||
environ[key] = hdr_value
|
||||
|
||||
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
|
||||
|
||||
path_info = uri_parts.path
|
||||
|
||||
environ['PATH_INFO'] = path_info
|
||||
environ['SCRIPT_NAME'] = ''
|
||||
|
||||
return environ
|
||||
|
||||
|
||||
def make_response(status, headers, payload, environ):
|
||||
"""This function generates an appropriate response object for this async
|
||||
mode.
|
||||
"""
|
||||
return Response(body=payload, status=int(status.split()[0]),
|
||||
headers=headers)
|
||||
|
||||
|
||||
class WebSocket(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides a aiohttp WebSocket interface that is
|
||||
somewhat compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, handler):
|
||||
self.handler = handler
|
||||
self._sock = None
|
||||
|
||||
async def __call__(self, environ):
|
||||
request = environ['aiohttp.request']
|
||||
self._sock = WebSocketResponse()
|
||||
await self._sock.prepare(request)
|
||||
|
||||
self.environ = environ
|
||||
await self.handler(self)
|
||||
return self._sock
|
||||
|
||||
async def close(self):
|
||||
await self._sock.close()
|
||||
|
||||
async def send(self, message):
|
||||
if isinstance(message, bytes):
|
||||
f = self._sock.send_bytes
|
||||
else:
|
||||
f = self._sock.send_str
|
||||
if asyncio.iscoroutinefunction(f):
|
||||
await f(message)
|
||||
else:
|
||||
f(message)
|
||||
|
||||
async def wait(self):
|
||||
msg = await self._sock.receive()
|
||||
if not isinstance(msg.data, six.binary_type) and \
|
||||
not isinstance(msg.data, six.text_type):
|
||||
raise IOError()
|
||||
return msg.data
|
||||
|
||||
|
||||
_async = {
|
||||
'asyncio': True,
|
||||
'create_route': create_route,
|
||||
'translate_request': translate_request,
|
||||
'make_response': make_response,
|
||||
'websocket': WebSocket,
|
||||
}
|
@ -0,0 +1,214 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
from engineio.static_files import get_static_file
|
||||
|
||||
|
||||
class ASGIApp:
|
||||
"""ASGI application middleware for Engine.IO.
|
||||
|
||||
This middleware dispatches traffic to an Engine.IO application. It can
|
||||
also serve a list of static files to the client, or forward unrelated
|
||||
HTTP traffic to another ASGI application.
|
||||
|
||||
:param engineio_server: The Engine.IO server. Must be an instance of the
|
||||
``engineio.AsyncServer`` class.
|
||||
:param static_files: A dictionary with static file mapping rules. See the
|
||||
documentation for details on this argument.
|
||||
:param other_asgi_app: A separate ASGI app that receives all other traffic.
|
||||
:param engineio_path: The endpoint where the Engine.IO application should
|
||||
be installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
import engineio
|
||||
import uvicorn
|
||||
|
||||
eio = engineio.AsyncServer()
|
||||
app = engineio.ASGIApp(eio, static_files={
|
||||
'/': {'content_type': 'text/html', 'filename': 'index.html'},
|
||||
'/index.html': {'content_type': 'text/html',
|
||||
'filename': 'index.html'},
|
||||
})
|
||||
uvicorn.run(app, '127.0.0.1', 5000)
|
||||
"""
|
||||
def __init__(self, engineio_server, other_asgi_app=None,
|
||||
static_files=None, engineio_path='engine.io'):
|
||||
self.engineio_server = engineio_server
|
||||
self.other_asgi_app = other_asgi_app
|
||||
self.engineio_path = engineio_path.strip('/')
|
||||
self.static_files = static_files or {}
|
||||
|
||||
async def __call__(self, scope, receive, send):
|
||||
if scope['type'] in ['http', 'websocket'] and \
|
||||
scope['path'].startswith('/{0}/'.format(self.engineio_path)):
|
||||
await self.engineio_server.handle_request(scope, receive, send)
|
||||
else:
|
||||
static_file = get_static_file(scope['path'], self.static_files) \
|
||||
if scope['type'] == 'http' and self.static_files else None
|
||||
if static_file:
|
||||
await self.serve_static_file(static_file, receive, send)
|
||||
elif self.other_asgi_app is not None:
|
||||
await self.other_asgi_app(scope, receive, send)
|
||||
elif scope['type'] == 'lifespan':
|
||||
await self.lifespan(receive, send)
|
||||
else:
|
||||
await self.not_found(receive, send)
|
||||
|
||||
async def serve_static_file(self, static_file, receive,
|
||||
send): # pragma: no cover
|
||||
event = await receive()
|
||||
if event['type'] == 'http.request':
|
||||
if os.path.exists(static_file['filename']):
|
||||
with open(static_file['filename'], 'rb') as f:
|
||||
payload = f.read()
|
||||
await send({'type': 'http.response.start',
|
||||
'status': 200,
|
||||
'headers': [(b'Content-Type', static_file[
|
||||
'content_type'].encode('utf-8'))]})
|
||||
await send({'type': 'http.response.body',
|
||||
'body': payload})
|
||||
else:
|
||||
await self.not_found(receive, send)
|
||||
|
||||
async def lifespan(self, receive, send):
|
||||
event = await receive()
|
||||
if event['type'] == 'lifespan.startup':
|
||||
await send({'type': 'lifespan.startup.complete'})
|
||||
elif event['type'] == 'lifespan.shutdown':
|
||||
await send({'type': 'lifespan.shutdown.complete'})
|
||||
|
||||
async def not_found(self, receive, send):
|
||||
"""Return a 404 Not Found error to the client."""
|
||||
await send({'type': 'http.response.start',
|
||||
'status': 404,
|
||||
'headers': [(b'Content-Type', b'text/plain')]})
|
||||
await send({'type': 'http.response.body',
|
||||
'body': b'Not Found'})
|
||||
|
||||
|
||||
async def translate_request(scope, receive, send):
|
||||
class AwaitablePayload(object): # pragma: no cover
|
||||
def __init__(self, payload):
|
||||
self.payload = payload or b''
|
||||
|
||||
async def read(self, length=None):
|
||||
if length is None:
|
||||
r = self.payload
|
||||
self.payload = b''
|
||||
else:
|
||||
r = self.payload[:length]
|
||||
self.payload = self.payload[length:]
|
||||
return r
|
||||
|
||||
event = await receive()
|
||||
payload = b''
|
||||
if event['type'] == 'http.request':
|
||||
payload += event.get('body') or b''
|
||||
while event.get('more_body'):
|
||||
event = await receive()
|
||||
if event['type'] == 'http.request':
|
||||
payload += event.get('body') or b''
|
||||
elif event['type'] == 'websocket.connect':
|
||||
await send({'type': 'websocket.accept'})
|
||||
else:
|
||||
return {}
|
||||
|
||||
raw_uri = scope['path'].encode('utf-8')
|
||||
if 'query_string' in scope and scope['query_string']:
|
||||
raw_uri += b'?' + scope['query_string']
|
||||
environ = {
|
||||
'wsgi.input': AwaitablePayload(payload),
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.async': True,
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.run_once': False,
|
||||
'SERVER_SOFTWARE': 'asgi',
|
||||
'REQUEST_METHOD': scope.get('method', 'GET'),
|
||||
'PATH_INFO': scope['path'],
|
||||
'QUERY_STRING': scope.get('query_string', b'').decode('utf-8'),
|
||||
'RAW_URI': raw_uri.decode('utf-8'),
|
||||
'SCRIPT_NAME': '',
|
||||
'SERVER_PROTOCOL': 'HTTP/1.1',
|
||||
'REMOTE_ADDR': '127.0.0.1',
|
||||
'REMOTE_PORT': '0',
|
||||
'SERVER_NAME': 'asgi',
|
||||
'SERVER_PORT': '0',
|
||||
'asgi.receive': receive,
|
||||
'asgi.send': send,
|
||||
}
|
||||
|
||||
for hdr_name, hdr_value in scope['headers']:
|
||||
hdr_name = hdr_name.upper().decode('utf-8')
|
||||
hdr_value = hdr_value.decode('utf-8')
|
||||
if hdr_name == 'CONTENT-TYPE':
|
||||
environ['CONTENT_TYPE'] = hdr_value
|
||||
continue
|
||||
elif hdr_name == 'CONTENT-LENGTH':
|
||||
environ['CONTENT_LENGTH'] = hdr_value
|
||||
continue
|
||||
|
||||
key = 'HTTP_%s' % hdr_name.replace('-', '_')
|
||||
if key in environ:
|
||||
hdr_value = '%s,%s' % (environ[key], hdr_value)
|
||||
|
||||
environ[key] = hdr_value
|
||||
|
||||
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
|
||||
return environ
|
||||
|
||||
|
||||
async def make_response(status, headers, payload, environ):
|
||||
headers = [(h[0].encode('utf-8'), h[1].encode('utf-8')) for h in headers]
|
||||
await environ['asgi.send']({'type': 'http.response.start',
|
||||
'status': int(status.split(' ')[0]),
|
||||
'headers': headers})
|
||||
await environ['asgi.send']({'type': 'http.response.body',
|
||||
'body': payload})
|
||||
|
||||
|
||||
class WebSocket(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides an asgi WebSocket interface that is
|
||||
somewhat compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, handler):
|
||||
self.handler = handler
|
||||
self.asgi_receive = None
|
||||
self.asgi_send = None
|
||||
|
||||
async def __call__(self, environ):
|
||||
self.asgi_receive = environ['asgi.receive']
|
||||
self.asgi_send = environ['asgi.send']
|
||||
await self.handler(self)
|
||||
|
||||
async def close(self):
|
||||
await self.asgi_send({'type': 'websocket.close'})
|
||||
|
||||
async def send(self, message):
|
||||
msg_bytes = None
|
||||
msg_text = None
|
||||
if isinstance(message, bytes):
|
||||
msg_bytes = message
|
||||
else:
|
||||
msg_text = message
|
||||
await self.asgi_send({'type': 'websocket.send',
|
||||
'bytes': msg_bytes,
|
||||
'text': msg_text})
|
||||
|
||||
async def wait(self):
|
||||
event = await self.asgi_receive()
|
||||
if event['type'] != 'websocket.receive':
|
||||
raise IOError()
|
||||
return event.get('bytes') or event.get('text')
|
||||
|
||||
|
||||
_async = {
|
||||
'asyncio': True,
|
||||
'translate_request': translate_request,
|
||||
'make_response': make_response,
|
||||
'websocket': WebSocket,
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
from eventlet.green.threading import Thread, Event
|
||||
from eventlet import queue
|
||||
from eventlet import sleep
|
||||
from eventlet.websocket import WebSocketWSGI as _WebSocketWSGI
|
||||
|
||||
|
||||
class WebSocketWSGI(_WebSocketWSGI):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(WebSocketWSGI, self).__init__(*args, **kwargs)
|
||||
self._sock = None
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if 'eventlet.input' not in environ:
|
||||
raise RuntimeError('You need to use the eventlet server. '
|
||||
'See the Deployment section of the '
|
||||
'documentation for more information.')
|
||||
self._sock = environ['eventlet.input'].get_socket()
|
||||
return super(WebSocketWSGI, self).__call__(environ, start_response)
|
||||
|
||||
|
||||
_async = {
|
||||
'thread': Thread,
|
||||
'queue': queue.Queue,
|
||||
'queue_empty': queue.Empty,
|
||||
'event': Event,
|
||||
'websocket': WebSocketWSGI,
|
||||
'sleep': sleep,
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import gevent
|
||||
from gevent import queue
|
||||
from gevent.event import Event
|
||||
try:
|
||||
import geventwebsocket # noqa
|
||||
_websocket_available = True
|
||||
except ImportError:
|
||||
_websocket_available = False
|
||||
|
||||
|
||||
class Thread(gevent.Greenlet): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides gevent Greenlet interface that is compatible
|
||||
with the standard library's Thread class.
|
||||
"""
|
||||
def __init__(self, target, args=[], kwargs={}):
|
||||
super(Thread, self).__init__(target, *args, **kwargs)
|
||||
|
||||
def _run(self):
|
||||
return self.run()
|
||||
|
||||
|
||||
class WebSocketWSGI(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides a gevent WebSocket interface that is
|
||||
compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if 'wsgi.websocket' not in environ:
|
||||
raise RuntimeError('You need to use the gevent-websocket server. '
|
||||
'See the Deployment section of the '
|
||||
'documentation for more information.')
|
||||
self._sock = environ['wsgi.websocket']
|
||||
self.environ = environ
|
||||
self.version = self._sock.version
|
||||
self.path = self._sock.path
|
||||
self.origin = self._sock.origin
|
||||
self.protocol = self._sock.protocol
|
||||
return self.app(self)
|
||||
|
||||
def close(self):
|
||||
return self._sock.close()
|
||||
|
||||
def send(self, message):
|
||||
return self._sock.send(message)
|
||||
|
||||
def wait(self):
|
||||
return self._sock.receive()
|
||||
|
||||
|
||||
_async = {
|
||||
'thread': Thread,
|
||||
'queue': queue.JoinableQueue,
|
||||
'queue_empty': queue.Empty,
|
||||
'event': Event,
|
||||
'websocket': WebSocketWSGI if _websocket_available else None,
|
||||
'sleep': gevent.sleep,
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
from __future__ import absolute_import
|
||||
|
||||
import six
|
||||
|
||||
import gevent
|
||||
from gevent import queue
|
||||
from gevent.event import Event
|
||||
import uwsgi
|
||||
_websocket_available = hasattr(uwsgi, 'websocket_handshake')
|
||||
|
||||
|
||||
class Thread(gevent.Greenlet): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides gevent Greenlet interface that is compatible
|
||||
with the standard library's Thread class.
|
||||
"""
|
||||
def __init__(self, target, args=[], kwargs={}):
|
||||
super(Thread, self).__init__(target, *args, **kwargs)
|
||||
|
||||
def _run(self):
|
||||
return self.run()
|
||||
|
||||
|
||||
class uWSGIWebSocket(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides a uWSGI WebSocket interface that is
|
||||
compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, app):
|
||||
self.app = app
|
||||
self._sock = None
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
self._sock = uwsgi.connection_fd()
|
||||
self.environ = environ
|
||||
|
||||
uwsgi.websocket_handshake()
|
||||
|
||||
self._req_ctx = None
|
||||
if hasattr(uwsgi, 'request_context'):
|
||||
# uWSGI >= 2.1.x with support for api access across-greenlets
|
||||
self._req_ctx = uwsgi.request_context()
|
||||
else:
|
||||
# use event and queue for sending messages
|
||||
from gevent.event import Event
|
||||
from gevent.queue import Queue
|
||||
from gevent.select import select
|
||||
self._event = Event()
|
||||
self._send_queue = Queue()
|
||||
|
||||
# spawn a select greenlet
|
||||
def select_greenlet_runner(fd, event):
|
||||
"""Sets event when data becomes available to read on fd."""
|
||||
while True:
|
||||
event.set()
|
||||
try:
|
||||
select([fd], [], [])[0]
|
||||
except ValueError:
|
||||
break
|
||||
self._select_greenlet = gevent.spawn(
|
||||
select_greenlet_runner,
|
||||
self._sock,
|
||||
self._event)
|
||||
|
||||
self.app(self)
|
||||
|
||||
def close(self):
|
||||
"""Disconnects uWSGI from the client."""
|
||||
uwsgi.disconnect()
|
||||
if self._req_ctx is None:
|
||||
# better kill it here in case wait() is not called again
|
||||
self._select_greenlet.kill()
|
||||
self._event.set()
|
||||
|
||||
def _send(self, msg):
|
||||
"""Transmits message either in binary or UTF-8 text mode,
|
||||
depending on its type."""
|
||||
if isinstance(msg, six.binary_type):
|
||||
method = uwsgi.websocket_send_binary
|
||||
else:
|
||||
method = uwsgi.websocket_send
|
||||
if self._req_ctx is not None:
|
||||
method(msg, request_context=self._req_ctx)
|
||||
else:
|
||||
method(msg)
|
||||
|
||||
def _decode_received(self, msg):
|
||||
"""Returns either bytes or str, depending on message type."""
|
||||
if not isinstance(msg, six.binary_type):
|
||||
# already decoded - do nothing
|
||||
return msg
|
||||
# only decode from utf-8 if message is not binary data
|
||||
type = six.byte2int(msg[0:1])
|
||||
if type >= 48: # no binary
|
||||
return msg.decode('utf-8')
|
||||
# binary message, don't try to decode
|
||||
return msg
|
||||
|
||||
def send(self, msg):
|
||||
"""Queues a message for sending. Real transmission is done in
|
||||
wait method.
|
||||
Sends directly if uWSGI version is new enough."""
|
||||
if self._req_ctx is not None:
|
||||
self._send(msg)
|
||||
else:
|
||||
self._send_queue.put(msg)
|
||||
self._event.set()
|
||||
|
||||
def wait(self):
|
||||
"""Waits and returns received messages.
|
||||
If running in compatibility mode for older uWSGI versions,
|
||||
it also sends messages that have been queued by send().
|
||||
A return value of None means that connection was closed.
|
||||
This must be called repeatedly. For uWSGI < 2.1.x it must
|
||||
be called from the main greenlet."""
|
||||
while True:
|
||||
if self._req_ctx is not None:
|
||||
try:
|
||||
msg = uwsgi.websocket_recv(request_context=self._req_ctx)
|
||||
except IOError: # connection closed
|
||||
return None
|
||||
return self._decode_received(msg)
|
||||
else:
|
||||
# we wake up at least every 3 seconds to let uWSGI
|
||||
# do its ping/ponging
|
||||
event_set = self._event.wait(timeout=3)
|
||||
if event_set:
|
||||
self._event.clear()
|
||||
# maybe there is something to send
|
||||
msgs = []
|
||||
while True:
|
||||
try:
|
||||
msgs.append(self._send_queue.get(block=False))
|
||||
except gevent.queue.Empty:
|
||||
break
|
||||
for msg in msgs:
|
||||
self._send(msg)
|
||||
# maybe there is something to receive, if not, at least
|
||||
# ensure uWSGI does its ping/ponging
|
||||
try:
|
||||
msg = uwsgi.websocket_recv_nb()
|
||||
except IOError: # connection closed
|
||||
self._select_greenlet.kill()
|
||||
return None
|
||||
if msg: # message available
|
||||
return self._decode_received(msg)
|
||||
|
||||
|
||||
_async = {
|
||||
'thread': Thread,
|
||||
'queue': queue.JoinableQueue,
|
||||
'queue_empty': queue.Empty,
|
||||
'event': Event,
|
||||
'websocket': uWSGIWebSocket if _websocket_available else None,
|
||||
'sleep': gevent.sleep,
|
||||
}
|
@ -0,0 +1,144 @@
|
||||
import sys
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
from sanic.response import HTTPResponse
|
||||
try:
|
||||
from sanic.websocket import WebSocketProtocol
|
||||
except ImportError:
|
||||
# the installed version of sanic does not have websocket support
|
||||
WebSocketProtocol = None
|
||||
import six
|
||||
|
||||
|
||||
def create_route(app, engineio_server, engineio_endpoint):
|
||||
"""This function sets up the engine.io endpoint as a route for the
|
||||
application.
|
||||
|
||||
Note that both GET and POST requests must be hooked up on the engine.io
|
||||
endpoint.
|
||||
"""
|
||||
app.add_route(engineio_server.handle_request, engineio_endpoint,
|
||||
methods=['GET', 'POST', 'OPTIONS'])
|
||||
try:
|
||||
app.enable_websocket()
|
||||
except AttributeError:
|
||||
# ignore, this version does not support websocket
|
||||
pass
|
||||
|
||||
|
||||
def translate_request(request):
|
||||
"""This function takes the arguments passed to the request handler and
|
||||
uses them to generate a WSGI compatible environ dictionary.
|
||||
"""
|
||||
class AwaitablePayload(object):
|
||||
def __init__(self, payload):
|
||||
self.payload = payload or b''
|
||||
|
||||
async def read(self, length=None):
|
||||
if length is None:
|
||||
r = self.payload
|
||||
self.payload = b''
|
||||
else:
|
||||
r = self.payload[:length]
|
||||
self.payload = self.payload[length:]
|
||||
return r
|
||||
|
||||
uri_parts = urlsplit(request.url)
|
||||
environ = {
|
||||
'wsgi.input': AwaitablePayload(request.body),
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.async': True,
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.run_once': False,
|
||||
'SERVER_SOFTWARE': 'sanic',
|
||||
'REQUEST_METHOD': request.method,
|
||||
'QUERY_STRING': uri_parts.query or '',
|
||||
'RAW_URI': request.url,
|
||||
'SERVER_PROTOCOL': 'HTTP/' + request.version,
|
||||
'REMOTE_ADDR': '127.0.0.1',
|
||||
'REMOTE_PORT': '0',
|
||||
'SERVER_NAME': 'sanic',
|
||||
'SERVER_PORT': '0',
|
||||
'sanic.request': request
|
||||
}
|
||||
|
||||
for hdr_name, hdr_value in request.headers.items():
|
||||
hdr_name = hdr_name.upper()
|
||||
if hdr_name == 'CONTENT-TYPE':
|
||||
environ['CONTENT_TYPE'] = hdr_value
|
||||
continue
|
||||
elif hdr_name == 'CONTENT-LENGTH':
|
||||
environ['CONTENT_LENGTH'] = hdr_value
|
||||
continue
|
||||
|
||||
key = 'HTTP_%s' % hdr_name.replace('-', '_')
|
||||
if key in environ:
|
||||
hdr_value = '%s,%s' % (environ[key], hdr_value)
|
||||
|
||||
environ[key] = hdr_value
|
||||
|
||||
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
|
||||
|
||||
path_info = uri_parts.path
|
||||
|
||||
environ['PATH_INFO'] = path_info
|
||||
environ['SCRIPT_NAME'] = ''
|
||||
|
||||
return environ
|
||||
|
||||
|
||||
def make_response(status, headers, payload, environ):
|
||||
"""This function generates an appropriate response object for this async
|
||||
mode.
|
||||
"""
|
||||
headers_dict = {}
|
||||
content_type = None
|
||||
for h in headers:
|
||||
if h[0].lower() == 'content-type':
|
||||
content_type = h[1]
|
||||
else:
|
||||
headers_dict[h[0]] = h[1]
|
||||
return HTTPResponse(body_bytes=payload, content_type=content_type,
|
||||
status=int(status.split()[0]), headers=headers_dict)
|
||||
|
||||
|
||||
class WebSocket(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides a sanic WebSocket interface that is
|
||||
somewhat compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, handler):
|
||||
self.handler = handler
|
||||
self._sock = None
|
||||
|
||||
async def __call__(self, environ):
|
||||
request = environ['sanic.request']
|
||||
protocol = request.transport.get_protocol()
|
||||
self._sock = await protocol.websocket_handshake(request)
|
||||
|
||||
self.environ = environ
|
||||
await self.handler(self)
|
||||
|
||||
async def close(self):
|
||||
await self._sock.close()
|
||||
|
||||
async def send(self, message):
|
||||
await self._sock.send(message)
|
||||
|
||||
async def wait(self):
|
||||
data = await self._sock.recv()
|
||||
if not isinstance(data, six.binary_type) and \
|
||||
not isinstance(data, six.text_type):
|
||||
raise IOError()
|
||||
return data
|
||||
|
||||
|
||||
_async = {
|
||||
'asyncio': True,
|
||||
'create_route': create_route,
|
||||
'translate_request': translate_request,
|
||||
'make_response': make_response,
|
||||
'websocket': WebSocket if WebSocketProtocol else None,
|
||||
}
|
@ -0,0 +1,17 @@
|
||||
from __future__ import absolute_import
|
||||
import threading
|
||||
import time
|
||||
|
||||
try:
|
||||
import queue
|
||||
except ImportError: # pragma: no cover
|
||||
import Queue as queue
|
||||
|
||||
_async = {
|
||||
'thread': threading.Thread,
|
||||
'queue': queue.Queue,
|
||||
'queue_empty': queue.Empty,
|
||||
'event': threading.Event,
|
||||
'websocket': None,
|
||||
'sleep': time.sleep,
|
||||
}
|
@ -0,0 +1,184 @@
|
||||
import asyncio
|
||||
import sys
|
||||
from urllib.parse import urlsplit
|
||||
from .. import exceptions
|
||||
|
||||
import tornado.web
|
||||
import tornado.websocket
|
||||
import six
|
||||
|
||||
|
||||
def get_tornado_handler(engineio_server):
|
||||
class Handler(tornado.websocket.WebSocketHandler): # pragma: no cover
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
if isinstance(engineio_server.cors_allowed_origins,
|
||||
six.string_types):
|
||||
if engineio_server.cors_allowed_origins == '*':
|
||||
self.allowed_origins = None
|
||||
else:
|
||||
self.allowed_origins = [
|
||||
engineio_server.cors_allowed_origins]
|
||||
else:
|
||||
self.allowed_origins = engineio_server.cors_allowed_origins
|
||||
self.receive_queue = asyncio.Queue()
|
||||
|
||||
async def get(self, *args, **kwargs):
|
||||
if self.request.headers.get('Upgrade', '').lower() == 'websocket':
|
||||
ret = super().get(*args, **kwargs)
|
||||
if asyncio.iscoroutine(ret):
|
||||
await ret
|
||||
else:
|
||||
await engineio_server.handle_request(self)
|
||||
|
||||
async def open(self, *args, **kwargs):
|
||||
# this is the handler for the websocket request
|
||||
asyncio.ensure_future(engineio_server.handle_request(self))
|
||||
|
||||
async def post(self, *args, **kwargs):
|
||||
await engineio_server.handle_request(self)
|
||||
|
||||
async def options(self, *args, **kwargs):
|
||||
await engineio_server.handle_request(self)
|
||||
|
||||
async def on_message(self, message):
|
||||
await self.receive_queue.put(message)
|
||||
|
||||
async def get_next_message(self):
|
||||
return await self.receive_queue.get()
|
||||
|
||||
def on_close(self):
|
||||
self.receive_queue.put_nowait(None)
|
||||
|
||||
def check_origin(self, origin):
|
||||
if self.allowed_origins is None or origin in self.allowed_origins:
|
||||
return True
|
||||
return super().check_origin(origin)
|
||||
|
||||
def get_compression_options(self):
|
||||
# enable compression
|
||||
return {}
|
||||
|
||||
return Handler
|
||||
|
||||
|
||||
def translate_request(handler):
|
||||
"""This function takes the arguments passed to the request handler and
|
||||
uses them to generate a WSGI compatible environ dictionary.
|
||||
"""
|
||||
class AwaitablePayload(object):
|
||||
def __init__(self, payload):
|
||||
self.payload = payload or b''
|
||||
|
||||
async def read(self, length=None):
|
||||
if length is None:
|
||||
r = self.payload
|
||||
self.payload = b''
|
||||
else:
|
||||
r = self.payload[:length]
|
||||
self.payload = self.payload[length:]
|
||||
return r
|
||||
|
||||
payload = handler.request.body
|
||||
|
||||
uri_parts = urlsplit(handler.request.path)
|
||||
full_uri = handler.request.path
|
||||
if handler.request.query: # pragma: no cover
|
||||
full_uri += '?' + handler.request.query
|
||||
environ = {
|
||||
'wsgi.input': AwaitablePayload(payload),
|
||||
'wsgi.errors': sys.stderr,
|
||||
'wsgi.version': (1, 0),
|
||||
'wsgi.async': True,
|
||||
'wsgi.multithread': False,
|
||||
'wsgi.multiprocess': False,
|
||||
'wsgi.run_once': False,
|
||||
'SERVER_SOFTWARE': 'aiohttp',
|
||||
'REQUEST_METHOD': handler.request.method,
|
||||
'QUERY_STRING': handler.request.query or '',
|
||||
'RAW_URI': full_uri,
|
||||
'SERVER_PROTOCOL': 'HTTP/%s' % handler.request.version,
|
||||
'REMOTE_ADDR': '127.0.0.1',
|
||||
'REMOTE_PORT': '0',
|
||||
'SERVER_NAME': 'aiohttp',
|
||||
'SERVER_PORT': '0',
|
||||
'tornado.handler': handler
|
||||
}
|
||||
|
||||
for hdr_name, hdr_value in handler.request.headers.items():
|
||||
hdr_name = hdr_name.upper()
|
||||
if hdr_name == 'CONTENT-TYPE':
|
||||
environ['CONTENT_TYPE'] = hdr_value
|
||||
continue
|
||||
elif hdr_name == 'CONTENT-LENGTH':
|
||||
environ['CONTENT_LENGTH'] = hdr_value
|
||||
continue
|
||||
|
||||
key = 'HTTP_%s' % hdr_name.replace('-', '_')
|
||||
environ[key] = hdr_value
|
||||
|
||||
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
|
||||
|
||||
path_info = uri_parts.path
|
||||
|
||||
environ['PATH_INFO'] = path_info
|
||||
environ['SCRIPT_NAME'] = ''
|
||||
|
||||
return environ
|
||||
|
||||
|
||||
def make_response(status, headers, payload, environ):
|
||||
"""This function generates an appropriate response object for this async
|
||||
mode.
|
||||
"""
|
||||
tornado_handler = environ['tornado.handler']
|
||||
try:
|
||||
tornado_handler.set_status(int(status.split()[0]))
|
||||
except RuntimeError: # pragma: no cover
|
||||
# for websocket connections Tornado does not accept a response, since
|
||||
# it already emitted the 101 status code
|
||||
return
|
||||
for header, value in headers:
|
||||
tornado_handler.set_header(header, value)
|
||||
tornado_handler.write(payload)
|
||||
tornado_handler.finish()
|
||||
|
||||
|
||||
class WebSocket(object): # pragma: no cover
|
||||
"""
|
||||
This wrapper class provides a tornado WebSocket interface that is
|
||||
somewhat compatible with eventlet's implementation.
|
||||
"""
|
||||
def __init__(self, handler):
|
||||
self.handler = handler
|
||||
self.tornado_handler = None
|
||||
|
||||
async def __call__(self, environ):
|
||||
self.tornado_handler = environ['tornado.handler']
|
||||
self.environ = environ
|
||||
await self.handler(self)
|
||||
|
||||
async def close(self):
|
||||
self.tornado_handler.close()
|
||||
|
||||
async def send(self, message):
|
||||
try:
|
||||
self.tornado_handler.write_message(
|
||||
message, binary=isinstance(message, bytes))
|
||||
except tornado.websocket.WebSocketClosedError:
|
||||
raise exceptions.EngineIOError()
|
||||
|
||||
async def wait(self):
|
||||
msg = await self.tornado_handler.get_next_message()
|
||||
if not isinstance(msg, six.binary_type) and \
|
||||
not isinstance(msg, six.text_type):
|
||||
raise IOError()
|
||||
return msg
|
||||
|
||||
|
||||
_async = {
|
||||
'asyncio': True,
|
||||
'translate_request': translate_request,
|
||||
'make_response': make_response,
|
||||
'websocket': WebSocket,
|
||||
}
|
@ -0,0 +1,585 @@
|
||||
import asyncio
|
||||
import ssl
|
||||
|
||||
try:
|
||||
import aiohttp
|
||||
except ImportError: # pragma: no cover
|
||||
aiohttp = None
|
||||
import six
|
||||
|
||||
from . import client
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import payload
|
||||
|
||||
|
||||
class AsyncClient(client.Client):
|
||||
"""An Engine.IO client for asyncio.
|
||||
|
||||
This class implements a fully compliant Engine.IO web client with support
|
||||
for websocket and long-polling transports, compatible with the asyncio
|
||||
framework on Python 3.5 or newer.
|
||||
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param request_timeout: A timeout in seconds for requests. The default is
|
||||
5 seconds.
|
||||
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
|
||||
skip SSL certificate verification, allowing
|
||||
connections to servers with self signed certificates.
|
||||
The default is ``True``.
|
||||
"""
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
async def connect(self, url, headers={}, transports=None,
|
||||
engineio_path='engine.io'):
|
||||
"""Connect to an Engine.IO server.
|
||||
|
||||
:param url: The URL of the Engine.IO server. It can include custom
|
||||
query string parameters if required by the server.
|
||||
:param headers: A dictionary with custom headers to send with the
|
||||
connection request.
|
||||
:param transports: The list of allowed transports. Valid transports
|
||||
are ``'polling'`` and ``'websocket'``. If not
|
||||
given, the polling transport is connected first,
|
||||
then an upgrade to websocket is attempted.
|
||||
:param engineio_path: The endpoint where the Engine.IO server is
|
||||
installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
|
||||
Example usage::
|
||||
|
||||
eio = engineio.Client()
|
||||
await eio.connect('http://localhost:5000')
|
||||
"""
|
||||
if self.state != 'disconnected':
|
||||
raise ValueError('Client is not in a disconnected state')
|
||||
valid_transports = ['polling', 'websocket']
|
||||
if transports is not None:
|
||||
if isinstance(transports, six.text_type):
|
||||
transports = [transports]
|
||||
transports = [transport for transport in transports
|
||||
if transport in valid_transports]
|
||||
if not transports:
|
||||
raise ValueError('No valid transports provided')
|
||||
self.transports = transports or valid_transports
|
||||
self.queue = self.create_queue()
|
||||
return await getattr(self, '_connect_' + self.transports[0])(
|
||||
url, headers, engineio_path)
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until the connection with the server ends.
|
||||
|
||||
Client applications can use this function to block the main thread
|
||||
during the life of the connection.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
if self.read_loop_task:
|
||||
await self.read_loop_task
|
||||
|
||||
async def send(self, data, binary=None):
|
||||
"""Send a message to a client.
|
||||
|
||||
:param data: The data to send to the client. Data can be of type
|
||||
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
|
||||
or ``dict``, the data will be serialized as JSON.
|
||||
:param binary: ``True`` to send packet as binary, ``False`` to send
|
||||
as text. If not given, unicode (Python 2) and str
|
||||
(Python 3) are sent as text, and str (Python 2) and
|
||||
bytes (Python 3) are sent as binary.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
await self._send_packet(packet.Packet(packet.MESSAGE, data=data,
|
||||
binary=binary))
|
||||
|
||||
async def disconnect(self, abort=False):
|
||||
"""Disconnect from the server.
|
||||
|
||||
:param abort: If set to ``True``, do not wait for background tasks
|
||||
associated with the connection to end.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
if self.state == 'connected':
|
||||
await self._send_packet(packet.Packet(packet.CLOSE))
|
||||
await self.queue.put(None)
|
||||
self.state = 'disconnecting'
|
||||
await self._trigger_event('disconnect', run_async=False)
|
||||
if self.current_transport == 'websocket':
|
||||
await self.ws.close()
|
||||
if not abort:
|
||||
await self.read_loop_task
|
||||
self.state = 'disconnected'
|
||||
try:
|
||||
client.connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return asyncio.ensure_future(target(*args, **kwargs))
|
||||
|
||||
async def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await asyncio.sleep(seconds)
|
||||
|
||||
def create_queue(self):
|
||||
"""Create a queue object."""
|
||||
q = asyncio.Queue()
|
||||
q.Empty = asyncio.QueueEmpty
|
||||
return q
|
||||
|
||||
def create_event(self):
|
||||
"""Create an event object."""
|
||||
return asyncio.Event()
|
||||
|
||||
def _reset(self):
|
||||
if self.http: # pragma: no cover
|
||||
asyncio.ensure_future(self.http.close())
|
||||
super()._reset()
|
||||
|
||||
async def _connect_polling(self, url, headers, engineio_path):
|
||||
"""Establish a long-polling connection to the Engine.IO server."""
|
||||
if aiohttp is None: # pragma: no cover
|
||||
self.logger.error('aiohttp not installed -- cannot make HTTP '
|
||||
'requests!')
|
||||
return
|
||||
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
|
||||
self.logger.info('Attempting polling connection to ' + self.base_url)
|
||||
r = await self._send_request(
|
||||
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
|
||||
timeout=self.request_timeout)
|
||||
if r is None:
|
||||
self._reset()
|
||||
raise exceptions.ConnectionError(
|
||||
'Connection refused by the server')
|
||||
if r.status < 200 or r.status >= 300:
|
||||
raise exceptions.ConnectionError(
|
||||
'Unexpected status code {} in server response'.format(
|
||||
r.status))
|
||||
try:
|
||||
p = payload.Payload(encoded_payload=await r.read())
|
||||
except ValueError:
|
||||
six.raise_from(exceptions.ConnectionError(
|
||||
'Unexpected response from server'), None)
|
||||
open_packet = p.packets[0]
|
||||
if open_packet.packet_type != packet.OPEN:
|
||||
raise exceptions.ConnectionError(
|
||||
'OPEN packet not returned by server')
|
||||
self.logger.info(
|
||||
'Polling connection accepted with ' + str(open_packet.data))
|
||||
self.sid = open_packet.data['sid']
|
||||
self.upgrades = open_packet.data['upgrades']
|
||||
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
|
||||
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
|
||||
self.current_transport = 'polling'
|
||||
self.base_url += '&sid=' + self.sid
|
||||
|
||||
self.state = 'connected'
|
||||
client.connected_clients.append(self)
|
||||
await self._trigger_event('connect', run_async=False)
|
||||
|
||||
for pkt in p.packets[1:]:
|
||||
await self._receive_packet(pkt)
|
||||
|
||||
if 'websocket' in self.upgrades and 'websocket' in self.transports:
|
||||
# attempt to upgrade to websocket
|
||||
if await self._connect_websocket(url, headers, engineio_path):
|
||||
# upgrade to websocket succeeded, we're done here
|
||||
return
|
||||
|
||||
self.ping_loop_task = self.start_background_task(self._ping_loop)
|
||||
self.write_loop_task = self.start_background_task(self._write_loop)
|
||||
self.read_loop_task = self.start_background_task(
|
||||
self._read_loop_polling)
|
||||
|
||||
async def _connect_websocket(self, url, headers, engineio_path):
|
||||
"""Establish or upgrade to a WebSocket connection with the server."""
|
||||
if aiohttp is None: # pragma: no cover
|
||||
self.logger.error('aiohttp package not installed')
|
||||
return False
|
||||
websocket_url = self._get_engineio_url(url, engineio_path,
|
||||
'websocket')
|
||||
if self.sid:
|
||||
self.logger.info(
|
||||
'Attempting WebSocket upgrade to ' + websocket_url)
|
||||
upgrade = True
|
||||
websocket_url += '&sid=' + self.sid
|
||||
else:
|
||||
upgrade = False
|
||||
self.base_url = websocket_url
|
||||
self.logger.info(
|
||||
'Attempting WebSocket connection to ' + websocket_url)
|
||||
|
||||
if self.http is None or self.http.closed: # pragma: no cover
|
||||
self.http = aiohttp.ClientSession()
|
||||
|
||||
try:
|
||||
if not self.ssl_verify:
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
ws = await self.http.ws_connect(
|
||||
websocket_url + self._get_url_timestamp(),
|
||||
headers=headers, ssl=ssl_context)
|
||||
else:
|
||||
ws = await self.http.ws_connect(
|
||||
websocket_url + self._get_url_timestamp(),
|
||||
headers=headers)
|
||||
except (aiohttp.client_exceptions.WSServerHandshakeError,
|
||||
aiohttp.client_exceptions.ServerConnectionError):
|
||||
if upgrade:
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: connection error')
|
||||
return False
|
||||
else:
|
||||
raise exceptions.ConnectionError('Connection error')
|
||||
if upgrade:
|
||||
p = packet.Packet(packet.PING, data='probe').encode(
|
||||
always_bytes=False)
|
||||
try:
|
||||
await ws.send_str(p)
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected send exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
try:
|
||||
p = (await ws.receive()).data
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected recv exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: no PONG packet')
|
||||
return False
|
||||
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
|
||||
try:
|
||||
await ws.send_str(p)
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected send exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
self.current_transport = 'websocket'
|
||||
self.logger.info('WebSocket upgrade was successful')
|
||||
else:
|
||||
try:
|
||||
p = (await ws.receive()).data
|
||||
except Exception as e: # pragma: no cover
|
||||
raise exceptions.ConnectionError(
|
||||
'Unexpected recv exception: ' + str(e))
|
||||
open_packet = packet.Packet(encoded_packet=p)
|
||||
if open_packet.packet_type != packet.OPEN:
|
||||
raise exceptions.ConnectionError('no OPEN packet')
|
||||
self.logger.info(
|
||||
'WebSocket connection accepted with ' + str(open_packet.data))
|
||||
self.sid = open_packet.data['sid']
|
||||
self.upgrades = open_packet.data['upgrades']
|
||||
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
|
||||
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
|
||||
self.current_transport = 'websocket'
|
||||
|
||||
self.state = 'connected'
|
||||
client.connected_clients.append(self)
|
||||
await self._trigger_event('connect', run_async=False)
|
||||
|
||||
self.ws = ws
|
||||
self.ping_loop_task = self.start_background_task(self._ping_loop)
|
||||
self.write_loop_task = self.start_background_task(self._write_loop)
|
||||
self.read_loop_task = self.start_background_task(
|
||||
self._read_loop_websocket)
|
||||
return True
|
||||
|
||||
async def _receive_packet(self, pkt):
|
||||
"""Handle incoming packets from the server."""
|
||||
packet_name = packet.packet_names[pkt.packet_type] \
|
||||
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
|
||||
self.logger.info(
|
||||
'Received packet %s data %s', packet_name,
|
||||
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
|
||||
if pkt.packet_type == packet.MESSAGE:
|
||||
await self._trigger_event('message', pkt.data, run_async=True)
|
||||
elif pkt.packet_type == packet.PONG:
|
||||
self.pong_received = True
|
||||
elif pkt.packet_type == packet.CLOSE:
|
||||
await self.disconnect(abort=True)
|
||||
elif pkt.packet_type == packet.NOOP:
|
||||
pass
|
||||
else:
|
||||
self.logger.error('Received unexpected packet of type %s',
|
||||
pkt.packet_type)
|
||||
|
||||
async def _send_packet(self, pkt):
|
||||
"""Queue a packet to be sent to the server."""
|
||||
if self.state != 'connected':
|
||||
return
|
||||
await self.queue.put(pkt)
|
||||
self.logger.info(
|
||||
'Sending packet %s data %s',
|
||||
packet.packet_names[pkt.packet_type],
|
||||
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
|
||||
|
||||
async def _send_request(
|
||||
self, method, url, headers=None, body=None,
|
||||
timeout=None): # pragma: no cover
|
||||
if self.http is None or self.http.closed:
|
||||
self.http = aiohttp.ClientSession()
|
||||
http_method = getattr(self.http, method.lower())
|
||||
|
||||
try:
|
||||
if not self.ssl_verify:
|
||||
return await http_method(
|
||||
url, headers=headers, data=body,
|
||||
timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
|
||||
else:
|
||||
return await http_method(
|
||||
url, headers=headers, data=body,
|
||||
timeout=aiohttp.ClientTimeout(total=timeout))
|
||||
|
||||
except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
|
||||
self.logger.info('HTTP %s request to %s failed with error %s.',
|
||||
method, url, exc)
|
||||
|
||||
async def _trigger_event(self, event, *args, **kwargs):
|
||||
"""Invoke an event handler."""
|
||||
run_async = kwargs.pop('run_async', False)
|
||||
ret = None
|
||||
if event in self.handlers:
|
||||
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
|
||||
if run_async:
|
||||
return self.start_background_task(self.handlers[event],
|
||||
*args)
|
||||
else:
|
||||
try:
|
||||
ret = await self.handlers[event](*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
pass
|
||||
except:
|
||||
self.logger.exception(event + ' async handler error')
|
||||
if event == 'connect':
|
||||
# if connect handler raised error we reject the
|
||||
# connection
|
||||
return False
|
||||
else:
|
||||
if run_async:
|
||||
async def async_handler():
|
||||
return self.handlers[event](*args)
|
||||
|
||||
return self.start_background_task(async_handler)
|
||||
else:
|
||||
try:
|
||||
ret = self.handlers[event](*args)
|
||||
except:
|
||||
self.logger.exception(event + ' handler error')
|
||||
if event == 'connect':
|
||||
# if connect handler raised error we reject the
|
||||
# connection
|
||||
return False
|
||||
return ret
|
||||
|
||||
async def _ping_loop(self):
|
||||
"""This background task sends a PING to the server at the requested
|
||||
interval.
|
||||
"""
|
||||
self.pong_received = True
|
||||
if self.ping_loop_event is None:
|
||||
self.ping_loop_event = self.create_event()
|
||||
else:
|
||||
self.ping_loop_event.clear()
|
||||
while self.state == 'connected':
|
||||
if not self.pong_received:
|
||||
self.logger.info(
|
||||
'PONG response has not been received, aborting')
|
||||
if self.ws:
|
||||
await self.ws.close()
|
||||
await self.queue.put(None)
|
||||
break
|
||||
self.pong_received = False
|
||||
await self._send_packet(packet.Packet(packet.PING))
|
||||
try:
|
||||
await asyncio.wait_for(self.ping_loop_event.wait(),
|
||||
self.ping_interval)
|
||||
except (asyncio.TimeoutError,
|
||||
asyncio.CancelledError): # pragma: no cover
|
||||
pass
|
||||
self.logger.info('Exiting ping task')
|
||||
|
||||
async def _read_loop_polling(self):
|
||||
"""Read packets by polling the Engine.IO server."""
|
||||
while self.state == 'connected':
|
||||
self.logger.info(
|
||||
'Sending polling GET request to ' + self.base_url)
|
||||
r = await self._send_request(
|
||||
'GET', self.base_url + self._get_url_timestamp(),
|
||||
timeout=max(self.ping_interval, self.ping_timeout) + 5)
|
||||
if r is None:
|
||||
self.logger.warning(
|
||||
'Connection refused by the server, aborting')
|
||||
await self.queue.put(None)
|
||||
break
|
||||
if r.status < 200 or r.status >= 300:
|
||||
self.logger.warning('Unexpected status code %s in server '
|
||||
'response, aborting', r.status)
|
||||
await self.queue.put(None)
|
||||
break
|
||||
try:
|
||||
p = payload.Payload(encoded_payload=await r.read())
|
||||
except ValueError:
|
||||
self.logger.warning(
|
||||
'Unexpected packet from server, aborting')
|
||||
await self.queue.put(None)
|
||||
break
|
||||
for pkt in p.packets:
|
||||
await self._receive_packet(pkt)
|
||||
|
||||
self.logger.info('Waiting for write loop task to end')
|
||||
await self.write_loop_task
|
||||
self.logger.info('Waiting for ping loop task to end')
|
||||
if self.ping_loop_event: # pragma: no cover
|
||||
self.ping_loop_event.set()
|
||||
await self.ping_loop_task
|
||||
if self.state == 'connected':
|
||||
await self._trigger_event('disconnect', run_async=False)
|
||||
try:
|
||||
client.connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
self.logger.info('Exiting read loop task')
|
||||
|
||||
async def _read_loop_websocket(self):
|
||||
"""Read packets from the Engine.IO WebSocket connection."""
|
||||
while self.state == 'connected':
|
||||
p = None
|
||||
try:
|
||||
p = (await self.ws.receive()).data
|
||||
if p is None: # pragma: no cover
|
||||
raise RuntimeError('WebSocket read returned None')
|
||||
except aiohttp.client_exceptions.ServerDisconnectedError:
|
||||
self.logger.info(
|
||||
'Read loop: WebSocket connection was closed, aborting')
|
||||
await self.queue.put(None)
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.info(
|
||||
'Unexpected error "%s", aborting', str(e))
|
||||
await self.queue.put(None)
|
||||
break
|
||||
if isinstance(p, six.text_type): # pragma: no cover
|
||||
p = p.encode('utf-8')
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
await self._receive_packet(pkt)
|
||||
|
||||
self.logger.info('Waiting for write loop task to end')
|
||||
await self.write_loop_task
|
||||
self.logger.info('Waiting for ping loop task to end')
|
||||
if self.ping_loop_event: # pragma: no cover
|
||||
self.ping_loop_event.set()
|
||||
await self.ping_loop_task
|
||||
if self.state == 'connected':
|
||||
await self._trigger_event('disconnect', run_async=False)
|
||||
try:
|
||||
client.connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
self.logger.info('Exiting read loop task')
|
||||
|
||||
async def _write_loop(self):
|
||||
"""This background task sends packages to the server as they are
|
||||
pushed to the send queue.
|
||||
"""
|
||||
while self.state == 'connected':
|
||||
# to simplify the timeout handling, use the maximum of the
|
||||
# ping interval and ping timeout as timeout, with an extra 5
|
||||
# seconds grace period
|
||||
timeout = max(self.ping_interval, self.ping_timeout) + 5
|
||||
packets = None
|
||||
try:
|
||||
packets = [await asyncio.wait_for(self.queue.get(), timeout)]
|
||||
except (self.queue.Empty, asyncio.TimeoutError,
|
||||
asyncio.CancelledError):
|
||||
self.logger.error('packet queue is empty, aborting')
|
||||
break
|
||||
if packets == [None]:
|
||||
self.queue.task_done()
|
||||
packets = []
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
packets.append(self.queue.get_nowait())
|
||||
except self.queue.Empty:
|
||||
break
|
||||
if packets[-1] is None:
|
||||
packets = packets[:-1]
|
||||
self.queue.task_done()
|
||||
break
|
||||
if not packets:
|
||||
# empty packet list returned -> connection closed
|
||||
break
|
||||
if self.current_transport == 'polling':
|
||||
p = payload.Payload(packets=packets)
|
||||
r = await self._send_request(
|
||||
'POST', self.base_url, body=p.encode(),
|
||||
headers={'Content-Type': 'application/octet-stream'},
|
||||
timeout=self.request_timeout)
|
||||
for pkt in packets:
|
||||
self.queue.task_done()
|
||||
if r is None:
|
||||
self.logger.warning(
|
||||
'Connection refused by the server, aborting')
|
||||
break
|
||||
if r.status < 200 or r.status >= 300:
|
||||
self.logger.warning('Unexpected status code %s in server '
|
||||
'response, aborting', r.status)
|
||||
self._reset()
|
||||
break
|
||||
else:
|
||||
# websocket
|
||||
try:
|
||||
for pkt in packets:
|
||||
if pkt.binary:
|
||||
await self.ws.send_bytes(pkt.encode(
|
||||
always_bytes=False))
|
||||
else:
|
||||
await self.ws.send_str(pkt.encode(
|
||||
always_bytes=False))
|
||||
self.queue.task_done()
|
||||
except aiohttp.client_exceptions.ServerDisconnectedError:
|
||||
self.logger.info(
|
||||
'Write loop: WebSocket connection was closed, '
|
||||
'aborting')
|
||||
break
|
||||
self.logger.info('Exiting write loop task')
|
@ -0,0 +1,472 @@
|
||||
import asyncio
|
||||
|
||||
import six
|
||||
from six.moves import urllib
|
||||
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import server
|
||||
from . import asyncio_socket
|
||||
|
||||
|
||||
class AsyncServer(server.Server):
|
||||
"""An Engine.IO server for asyncio.
|
||||
|
||||
This class implements a fully compliant Engine.IO web server with support
|
||||
for websocket and long-polling transports, compatible with the asyncio
|
||||
framework on Python 3.5 or newer.
|
||||
|
||||
:param async_mode: The asynchronous model to use. See the Deployment
|
||||
section in the documentation for a description of the
|
||||
available options. Valid async modes are "aiohttp",
|
||||
"sanic", "tornado" and "asgi". If this argument is not
|
||||
given, "aiohttp" is tried first, followed by "sanic",
|
||||
"tornado", and finally "asgi". The first async mode that
|
||||
has all its dependencies installed is the one that is
|
||||
chosen.
|
||||
:param ping_timeout: The time in seconds that the client waits for the
|
||||
server to respond before disconnecting.
|
||||
:param ping_interval: The interval in seconds at which the client pings
|
||||
the server. The default is 25 seconds. For advanced
|
||||
control, a two element tuple can be given, where
|
||||
the first number is the ping interval and the second
|
||||
is a grace period added by the server. The default
|
||||
grace period is 5 seconds.
|
||||
:param max_http_buffer_size: The maximum size of a message when using the
|
||||
polling transport.
|
||||
:param allow_upgrades: Whether to allow transport upgrades or not.
|
||||
:param http_compression: Whether to compress packages when using the
|
||||
polling transport.
|
||||
:param compression_threshold: Only compress messages when their byte size
|
||||
is greater than this value.
|
||||
:param cookie: Name of the HTTP cookie that contains the client session
|
||||
id. If set to ``None``, a cookie is not sent to the client.
|
||||
:param cors_allowed_origins: Origin or list of origins that are allowed to
|
||||
connect to this server. Only the same origin
|
||||
is allowed by default. Set this argument to
|
||||
``'*'`` to allow all origins, or to ``[]`` to
|
||||
disable CORS handling.
|
||||
:param cors_credentials: Whether credentials (cookies, authentication) are
|
||||
allowed in requests to this server.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param async_handlers: If set to ``True``, run message event handlers in
|
||||
non-blocking threads. To run handlers synchronously,
|
||||
set to ``False``. The default is ``True``.
|
||||
:param kwargs: Reserved for future extensions, any additional parameters
|
||||
given as keyword arguments will be silently ignored.
|
||||
"""
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
def async_modes(self):
|
||||
return ['aiohttp', 'sanic', 'tornado', 'asgi']
|
||||
|
||||
def attach(self, app, engineio_path='engine.io'):
|
||||
"""Attach the Engine.IO server to an application."""
|
||||
engineio_path = engineio_path.strip('/')
|
||||
self._async['create_route'](app, self, '/{}/'.format(engineio_path))
|
||||
|
||||
async def send(self, sid, data, binary=None):
|
||||
"""Send a message to a client.
|
||||
|
||||
:param sid: The session id of the recipient client.
|
||||
:param data: The data to send to the client. Data can be of type
|
||||
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
|
||||
or ``dict``, the data will be serialized as JSON.
|
||||
:param binary: ``True`` to send packet as binary, ``False`` to send
|
||||
as text. If not given, unicode (Python 2) and str
|
||||
(Python 3) are sent as text, and str (Python 2) and
|
||||
bytes (Python 3) are sent as binary.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
try:
|
||||
socket = self._get_socket(sid)
|
||||
except KeyError:
|
||||
# the socket is not available
|
||||
self.logger.warning('Cannot send to sid %s', sid)
|
||||
return
|
||||
await socket.send(packet.Packet(packet.MESSAGE, data=data,
|
||||
binary=binary))
|
||||
|
||||
async def get_session(self, sid):
|
||||
"""Return the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
The return value is a dictionary. Modifications made to this
|
||||
dictionary are not guaranteed to be preserved. If you want to modify
|
||||
the user session, use the ``session`` context manager instead.
|
||||
"""
|
||||
socket = self._get_socket(sid)
|
||||
return socket.session
|
||||
|
||||
async def save_session(self, sid, session):
|
||||
"""Store the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param session: The session dictionary.
|
||||
"""
|
||||
socket = self._get_socket(sid)
|
||||
socket.session = session
|
||||
|
||||
def session(self, sid):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
This is a context manager that returns the user session dictionary for
|
||||
the client. Any changes that are made to this dictionary inside the
|
||||
context manager block are saved back to the session. Example usage::
|
||||
|
||||
@eio.on('connect')
|
||||
def on_connect(sid, environ):
|
||||
username = authenticate_user(environ)
|
||||
if not username:
|
||||
return False
|
||||
with eio.session(sid) as session:
|
||||
session['username'] = username
|
||||
|
||||
@eio.on('message')
|
||||
def on_message(sid, msg):
|
||||
async with eio.session(sid) as session:
|
||||
print('received message from ', session['username'])
|
||||
"""
|
||||
class _session_context_manager(object):
|
||||
def __init__(self, server, sid):
|
||||
self.server = server
|
||||
self.sid = sid
|
||||
self.session = None
|
||||
|
||||
async def __aenter__(self):
|
||||
self.session = await self.server.get_session(sid)
|
||||
return self.session
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
await self.server.save_session(sid, self.session)
|
||||
|
||||
return _session_context_manager(self, sid)
|
||||
|
||||
async def disconnect(self, sid=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
:param sid: The session id of the client to close. If this parameter
|
||||
is not given, then all clients are closed.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
if sid is not None:
|
||||
try:
|
||||
socket = self._get_socket(sid)
|
||||
except KeyError: # pragma: no cover
|
||||
# the socket was already closed or gone
|
||||
pass
|
||||
else:
|
||||
await socket.close()
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
del self.sockets[sid]
|
||||
else:
|
||||
await asyncio.wait([client.close()
|
||||
for client in six.itervalues(self.sockets)])
|
||||
self.sockets = {}
|
||||
|
||||
async def handle_request(self, *args, **kwargs):
|
||||
"""Handle an HTTP request from the client.
|
||||
|
||||
This is the entry point of the Engine.IO application. This function
|
||||
returns the HTTP response to deliver to the client.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
translate_request = self._async['translate_request']
|
||||
if asyncio.iscoroutinefunction(translate_request):
|
||||
environ = await translate_request(*args, **kwargs)
|
||||
else:
|
||||
environ = translate_request(*args, **kwargs)
|
||||
|
||||
if self.cors_allowed_origins != []:
|
||||
# Validate the origin header if present
|
||||
# This is important for WebSocket more than for HTTP, since
|
||||
# browsers only apply CORS controls to HTTP.
|
||||
origin = environ.get('HTTP_ORIGIN')
|
||||
if origin:
|
||||
allowed_origins = self._cors_allowed_origins(environ)
|
||||
if allowed_origins is not None and origin not in \
|
||||
allowed_origins:
|
||||
self.logger.info(origin + ' is not an accepted origin.')
|
||||
r = self._bad_request()
|
||||
make_response = self._async['make_response']
|
||||
if asyncio.iscoroutinefunction(make_response):
|
||||
response = await make_response(
|
||||
r['status'], r['headers'], r['response'], environ)
|
||||
else:
|
||||
response = make_response(r['status'], r['headers'],
|
||||
r['response'], environ)
|
||||
return response
|
||||
|
||||
method = environ['REQUEST_METHOD']
|
||||
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
|
||||
|
||||
sid = query['sid'][0] if 'sid' in query else None
|
||||
b64 = False
|
||||
jsonp = False
|
||||
jsonp_index = None
|
||||
|
||||
if 'b64' in query:
|
||||
if query['b64'][0] == "1" or query['b64'][0].lower() == "true":
|
||||
b64 = True
|
||||
if 'j' in query:
|
||||
jsonp = True
|
||||
try:
|
||||
jsonp_index = int(query['j'][0])
|
||||
except (ValueError, KeyError, IndexError):
|
||||
# Invalid JSONP index number
|
||||
pass
|
||||
|
||||
if jsonp and jsonp_index is None:
|
||||
self.logger.warning('Invalid JSONP index number')
|
||||
r = self._bad_request()
|
||||
elif method == 'GET':
|
||||
if sid is None:
|
||||
transport = query.get('transport', ['polling'])[0]
|
||||
if transport != 'polling' and transport != 'websocket':
|
||||
self.logger.warning('Invalid transport %s', transport)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
r = await self._handle_connect(environ, transport,
|
||||
b64, jsonp_index)
|
||||
else:
|
||||
if sid not in self.sockets:
|
||||
self.logger.warning('Invalid session %s', sid)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
socket = self._get_socket(sid)
|
||||
try:
|
||||
packets = await socket.handle_get_request(environ)
|
||||
if isinstance(packets, list):
|
||||
r = self._ok(packets, b64=b64,
|
||||
jsonp_index=jsonp_index)
|
||||
else:
|
||||
r = packets
|
||||
except exceptions.EngineIOError:
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
await self.disconnect(sid)
|
||||
r = self._bad_request()
|
||||
if sid in self.sockets and self.sockets[sid].closed:
|
||||
del self.sockets[sid]
|
||||
elif method == 'POST':
|
||||
if sid is None or sid not in self.sockets:
|
||||
self.logger.warning('Invalid session %s', sid)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
socket = self._get_socket(sid)
|
||||
try:
|
||||
await socket.handle_post_request(environ)
|
||||
r = self._ok(jsonp_index=jsonp_index)
|
||||
except exceptions.EngineIOError:
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
await self.disconnect(sid)
|
||||
r = self._bad_request()
|
||||
except: # pragma: no cover
|
||||
# for any other unexpected errors, we log the error
|
||||
# and keep going
|
||||
self.logger.exception('post request handler error')
|
||||
r = self._ok(jsonp_index=jsonp_index)
|
||||
elif method == 'OPTIONS':
|
||||
r = self._ok()
|
||||
else:
|
||||
self.logger.warning('Method %s not supported', method)
|
||||
r = self._method_not_found()
|
||||
if not isinstance(r, dict):
|
||||
return r
|
||||
if self.http_compression and \
|
||||
len(r['response']) >= self.compression_threshold:
|
||||
encodings = [e.split(';')[0].strip() for e in
|
||||
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
|
||||
for encoding in encodings:
|
||||
if encoding in self.compression_methods:
|
||||
r['response'] = \
|
||||
getattr(self, '_' + encoding)(r['response'])
|
||||
r['headers'] += [('Content-Encoding', encoding)]
|
||||
break
|
||||
cors_headers = self._cors_headers(environ)
|
||||
make_response = self._async['make_response']
|
||||
if asyncio.iscoroutinefunction(make_response):
|
||||
response = await make_response(r['status'],
|
||||
r['headers'] + cors_headers,
|
||||
r['response'], environ)
|
||||
else:
|
||||
response = make_response(r['status'], r['headers'] + cors_headers,
|
||||
r['response'], environ)
|
||||
return response
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
The return value is a ``asyncio.Task`` object.
|
||||
"""
|
||||
return asyncio.ensure_future(target(*args, **kwargs))
|
||||
|
||||
async def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await asyncio.sleep(seconds)
|
||||
|
||||
def create_queue(self, *args, **kwargs):
|
||||
"""Create a queue object using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to create a queue
|
||||
without having to worry about using the correct call for the selected
|
||||
async mode. For asyncio based async modes, this returns an instance of
|
||||
``asyncio.Queue``.
|
||||
"""
|
||||
return asyncio.Queue(*args, **kwargs)
|
||||
|
||||
def get_queue_empty_exception(self):
|
||||
"""Return the queue empty exception for the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to work with a
|
||||
queue without having to worry about using the correct call for the
|
||||
selected async mode. For asyncio based async modes, this returns an
|
||||
instance of ``asyncio.QueueEmpty``.
|
||||
"""
|
||||
return asyncio.QueueEmpty
|
||||
|
||||
def create_event(self, *args, **kwargs):
|
||||
"""Create an event object using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to create an
|
||||
event without having to worry about using the correct call for the
|
||||
selected async mode. For asyncio based async modes, this returns
|
||||
an instance of ``asyncio.Event``.
|
||||
"""
|
||||
return asyncio.Event(*args, **kwargs)
|
||||
|
||||
async def _handle_connect(self, environ, transport, b64=False,
|
||||
jsonp_index=None):
|
||||
"""Handle a client connection request."""
|
||||
if self.start_service_task:
|
||||
# start the service task to monitor connected clients
|
||||
self.start_service_task = False
|
||||
self.start_background_task(self._service_task)
|
||||
|
||||
sid = self._generate_id()
|
||||
s = asyncio_socket.AsyncSocket(self, sid)
|
||||
self.sockets[sid] = s
|
||||
|
||||
pkt = packet.Packet(
|
||||
packet.OPEN, {'sid': sid,
|
||||
'upgrades': self._upgrades(sid, transport),
|
||||
'pingTimeout': int(self.ping_timeout * 1000),
|
||||
'pingInterval': int(self.ping_interval * 1000)})
|
||||
await s.send(pkt)
|
||||
|
||||
ret = await self._trigger_event('connect', sid, environ,
|
||||
run_async=False)
|
||||
if ret is False:
|
||||
del self.sockets[sid]
|
||||
self.logger.warning('Application rejected connection')
|
||||
return self._unauthorized()
|
||||
|
||||
if transport == 'websocket':
|
||||
ret = await s.handle_get_request(environ)
|
||||
if s.closed:
|
||||
# websocket connection ended, so we are done
|
||||
del self.sockets[sid]
|
||||
return ret
|
||||
else:
|
||||
s.connected = True
|
||||
headers = None
|
||||
if self.cookie:
|
||||
headers = [('Set-Cookie', self.cookie + '=' + sid)]
|
||||
try:
|
||||
return self._ok(await s.poll(), headers=headers, b64=b64,
|
||||
jsonp_index=jsonp_index)
|
||||
except exceptions.QueueEmpty:
|
||||
return self._bad_request()
|
||||
|
||||
async def _trigger_event(self, event, *args, **kwargs):
|
||||
"""Invoke an event handler."""
|
||||
run_async = kwargs.pop('run_async', False)
|
||||
ret = None
|
||||
if event in self.handlers:
|
||||
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
|
||||
if run_async:
|
||||
return self.start_background_task(self.handlers[event],
|
||||
*args)
|
||||
else:
|
||||
try:
|
||||
ret = await self.handlers[event](*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
pass
|
||||
except:
|
||||
self.logger.exception(event + ' async handler error')
|
||||
if event == 'connect':
|
||||
# if connect handler raised error we reject the
|
||||
# connection
|
||||
return False
|
||||
else:
|
||||
if run_async:
|
||||
async def async_handler():
|
||||
return self.handlers[event](*args)
|
||||
|
||||
return self.start_background_task(async_handler)
|
||||
else:
|
||||
try:
|
||||
ret = self.handlers[event](*args)
|
||||
except:
|
||||
self.logger.exception(event + ' handler error')
|
||||
if event == 'connect':
|
||||
# if connect handler raised error we reject the
|
||||
# connection
|
||||
return False
|
||||
return ret
|
||||
|
||||
async def _service_task(self): # pragma: no cover
|
||||
"""Monitor connected clients and clean up those that time out."""
|
||||
while True:
|
||||
if len(self.sockets) == 0:
|
||||
# nothing to do
|
||||
await self.sleep(self.ping_timeout)
|
||||
continue
|
||||
|
||||
# go through the entire client list in a ping interval cycle
|
||||
sleep_interval = self.ping_timeout / len(self.sockets)
|
||||
|
||||
try:
|
||||
# iterate over the current clients
|
||||
for socket in self.sockets.copy().values():
|
||||
if not socket.closing and not socket.closed:
|
||||
await socket.check_ping_timeout()
|
||||
await self.sleep(sleep_interval)
|
||||
except (SystemExit, KeyboardInterrupt, asyncio.CancelledError):
|
||||
self.logger.info('service task canceled')
|
||||
break
|
||||
except:
|
||||
if asyncio.get_event_loop().is_closed():
|
||||
self.logger.info('event loop is closed, exiting service '
|
||||
'task')
|
||||
break
|
||||
|
||||
# an unexpected exception has occurred, log it and continue
|
||||
self.logger.exception('service task exception')
|
@ -0,0 +1,236 @@
|
||||
import asyncio
|
||||
import six
|
||||
import sys
|
||||
import time
|
||||
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import payload
|
||||
from . import socket
|
||||
|
||||
|
||||
class AsyncSocket(socket.Socket):
|
||||
async def poll(self):
|
||||
"""Wait for packets to send to the client."""
|
||||
try:
|
||||
packets = [await asyncio.wait_for(self.queue.get(),
|
||||
self.server.ping_timeout)]
|
||||
self.queue.task_done()
|
||||
except (asyncio.TimeoutError, asyncio.CancelledError):
|
||||
raise exceptions.QueueEmpty()
|
||||
if packets == [None]:
|
||||
return []
|
||||
try:
|
||||
packets.append(self.queue.get_nowait())
|
||||
self.queue.task_done()
|
||||
except asyncio.QueueEmpty:
|
||||
pass
|
||||
return packets
|
||||
|
||||
async def receive(self, pkt):
|
||||
"""Receive packet from the client."""
|
||||
self.server.logger.info('%s: Received packet %s data %s',
|
||||
self.sid, packet.packet_names[pkt.packet_type],
|
||||
pkt.data if not isinstance(pkt.data, bytes)
|
||||
else '<binary>')
|
||||
if pkt.packet_type == packet.PING:
|
||||
self.last_ping = time.time()
|
||||
await self.send(packet.Packet(packet.PONG, pkt.data))
|
||||
elif pkt.packet_type == packet.MESSAGE:
|
||||
await self.server._trigger_event(
|
||||
'message', self.sid, pkt.data,
|
||||
run_async=self.server.async_handlers)
|
||||
elif pkt.packet_type == packet.UPGRADE:
|
||||
await self.send(packet.Packet(packet.NOOP))
|
||||
elif pkt.packet_type == packet.CLOSE:
|
||||
await self.close(wait=False, abort=True)
|
||||
else:
|
||||
raise exceptions.UnknownPacketError()
|
||||
|
||||
async def check_ping_timeout(self):
|
||||
"""Make sure the client is still sending pings.
|
||||
|
||||
This helps detect disconnections for long-polling clients.
|
||||
"""
|
||||
if self.closed:
|
||||
raise exceptions.SocketIsClosedError()
|
||||
if time.time() - self.last_ping > self.server.ping_interval + \
|
||||
self.server.ping_interval_grace_period:
|
||||
self.server.logger.info('%s: Client is gone, closing socket',
|
||||
self.sid)
|
||||
# Passing abort=False here will cause close() to write a
|
||||
# CLOSE packet. This has the effect of updating half-open sockets
|
||||
# to their correct state of disconnected
|
||||
await self.close(wait=False, abort=False)
|
||||
return False
|
||||
return True
|
||||
|
||||
async def send(self, pkt):
|
||||
"""Send a packet to the client."""
|
||||
if not await self.check_ping_timeout():
|
||||
return
|
||||
if self.upgrading:
|
||||
self.packet_backlog.append(pkt)
|
||||
else:
|
||||
await self.queue.put(pkt)
|
||||
self.server.logger.info('%s: Sending packet %s data %s',
|
||||
self.sid, packet.packet_names[pkt.packet_type],
|
||||
pkt.data if not isinstance(pkt.data, bytes)
|
||||
else '<binary>')
|
||||
|
||||
async def handle_get_request(self, environ):
|
||||
"""Handle a long-polling GET request from the client."""
|
||||
connections = [
|
||||
s.strip()
|
||||
for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
|
||||
transport = environ.get('HTTP_UPGRADE', '').lower()
|
||||
if 'upgrade' in connections and transport in self.upgrade_protocols:
|
||||
self.server.logger.info('%s: Received request to upgrade to %s',
|
||||
self.sid, transport)
|
||||
return await getattr(self, '_upgrade_' + transport)(environ)
|
||||
try:
|
||||
packets = await self.poll()
|
||||
except exceptions.QueueEmpty:
|
||||
exc = sys.exc_info()
|
||||
await self.close(wait=False)
|
||||
six.reraise(*exc)
|
||||
return packets
|
||||
|
||||
async def handle_post_request(self, environ):
|
||||
"""Handle a long-polling POST request from the client."""
|
||||
length = int(environ.get('CONTENT_LENGTH', '0'))
|
||||
if length > self.server.max_http_buffer_size:
|
||||
raise exceptions.ContentTooLongError()
|
||||
else:
|
||||
body = await environ['wsgi.input'].read(length)
|
||||
p = payload.Payload(encoded_payload=body)
|
||||
for pkt in p.packets:
|
||||
await self.receive(pkt)
|
||||
|
||||
async def close(self, wait=True, abort=False):
|
||||
"""Close the socket connection."""
|
||||
if not self.closed and not self.closing:
|
||||
self.closing = True
|
||||
await self.server._trigger_event('disconnect', self.sid)
|
||||
if not abort:
|
||||
await self.send(packet.Packet(packet.CLOSE))
|
||||
self.closed = True
|
||||
if wait:
|
||||
await self.queue.join()
|
||||
|
||||
async def _upgrade_websocket(self, environ):
|
||||
"""Upgrade the connection from polling to websocket."""
|
||||
if self.upgraded:
|
||||
raise IOError('Socket has been upgraded already')
|
||||
if self.server._async['websocket'] is None:
|
||||
# the selected async mode does not support websocket
|
||||
return self.server._bad_request()
|
||||
ws = self.server._async['websocket'](self._websocket_handler)
|
||||
return await ws(environ)
|
||||
|
||||
async def _websocket_handler(self, ws):
|
||||
"""Engine.IO handler for websocket transport."""
|
||||
if self.connected:
|
||||
# the socket was already connected, so this is an upgrade
|
||||
self.upgrading = True # hold packet sends during the upgrade
|
||||
|
||||
try:
|
||||
pkt = await ws.wait()
|
||||
except IOError: # pragma: no cover
|
||||
return
|
||||
decoded_pkt = packet.Packet(encoded_packet=pkt)
|
||||
if decoded_pkt.packet_type != packet.PING or \
|
||||
decoded_pkt.data != 'probe':
|
||||
self.server.logger.info(
|
||||
'%s: Failed websocket upgrade, no PING packet', self.sid)
|
||||
return
|
||||
await ws.send(packet.Packet(
|
||||
packet.PONG,
|
||||
data=six.text_type('probe')).encode(always_bytes=False))
|
||||
await self.queue.put(packet.Packet(packet.NOOP)) # end poll
|
||||
|
||||
try:
|
||||
pkt = await ws.wait()
|
||||
except IOError: # pragma: no cover
|
||||
return
|
||||
decoded_pkt = packet.Packet(encoded_packet=pkt)
|
||||
if decoded_pkt.packet_type != packet.UPGRADE:
|
||||
self.upgraded = False
|
||||
self.server.logger.info(
|
||||
('%s: Failed websocket upgrade, expected UPGRADE packet, '
|
||||
'received %s instead.'),
|
||||
self.sid, pkt)
|
||||
return
|
||||
self.upgraded = True
|
||||
|
||||
# flush any packets that were sent during the upgrade
|
||||
for pkt in self.packet_backlog:
|
||||
await self.queue.put(pkt)
|
||||
self.packet_backlog = []
|
||||
self.upgrading = False
|
||||
else:
|
||||
self.connected = True
|
||||
self.upgraded = True
|
||||
|
||||
# start separate writer thread
|
||||
async def writer():
|
||||
while True:
|
||||
packets = None
|
||||
try:
|
||||
packets = await self.poll()
|
||||
except exceptions.QueueEmpty:
|
||||
break
|
||||
if not packets:
|
||||
# empty packet list returned -> connection closed
|
||||
break
|
||||
try:
|
||||
for pkt in packets:
|
||||
await ws.send(pkt.encode(always_bytes=False))
|
||||
except:
|
||||
break
|
||||
writer_task = asyncio.ensure_future(writer())
|
||||
|
||||
self.server.logger.info(
|
||||
'%s: Upgrade to websocket successful', self.sid)
|
||||
|
||||
while True:
|
||||
p = None
|
||||
wait_task = asyncio.ensure_future(ws.wait())
|
||||
try:
|
||||
p = await asyncio.wait_for(wait_task, self.server.ping_timeout)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
# there is a bug (https://bugs.python.org/issue30508) in
|
||||
# asyncio that causes a "Task exception never retrieved" error
|
||||
# to appear when wait_task raises an exception before it gets
|
||||
# cancelled. Calling wait_task.exception() prevents the error
|
||||
# from being issued in Python 3.6, but causes other errors in
|
||||
# other versions, so we run it with all errors suppressed and
|
||||
# hope for the best.
|
||||
try:
|
||||
wait_task.exception()
|
||||
except:
|
||||
pass
|
||||
break
|
||||
except:
|
||||
break
|
||||
if p is None:
|
||||
# connection closed by client
|
||||
break
|
||||
if isinstance(p, six.text_type): # pragma: no cover
|
||||
p = p.encode('utf-8')
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
try:
|
||||
await self.receive(pkt)
|
||||
except exceptions.UnknownPacketError: # pragma: no cover
|
||||
pass
|
||||
except exceptions.SocketIsClosedError: # pragma: no cover
|
||||
self.server.logger.info('Receive error -- socket is closed')
|
||||
break
|
||||
except: # pragma: no cover
|
||||
# if we get an unexpected exception we log the error and exit
|
||||
# the connection properly
|
||||
self.server.logger.exception('Unknown receive error')
|
||||
|
||||
await self.queue.put(None) # unlock the writer task so it can exit
|
||||
await asyncio.wait_for(writer_task, timeout=None)
|
||||
await self.close(wait=False, abort=True)
|
@ -0,0 +1,680 @@
|
||||
import logging
|
||||
try:
|
||||
import queue
|
||||
except ImportError: # pragma: no cover
|
||||
import Queue as queue
|
||||
import signal
|
||||
import ssl
|
||||
import threading
|
||||
import time
|
||||
|
||||
import six
|
||||
from six.moves import urllib
|
||||
try:
|
||||
import requests
|
||||
except ImportError: # pragma: no cover
|
||||
requests = None
|
||||
try:
|
||||
import websocket
|
||||
except ImportError: # pragma: no cover
|
||||
websocket = None
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import payload
|
||||
|
||||
default_logger = logging.getLogger('engineio.client')
|
||||
connected_clients = []
|
||||
|
||||
if six.PY2: # pragma: no cover
|
||||
ConnectionError = OSError
|
||||
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
"""SIGINT handler.
|
||||
|
||||
Disconnect all active clients and then invoke the original signal handler.
|
||||
"""
|
||||
for client in connected_clients[:]:
|
||||
if client.is_asyncio_based():
|
||||
client.start_background_task(client.disconnect, abort=True)
|
||||
else:
|
||||
client.disconnect(abort=True)
|
||||
if callable(original_signal_handler):
|
||||
return original_signal_handler(sig, frame)
|
||||
else: # pragma: no cover
|
||||
# Handle case where no original SIGINT handler was present.
|
||||
return signal.default_int_handler(sig, frame)
|
||||
|
||||
|
||||
original_signal_handler = None
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""An Engine.IO client.
|
||||
|
||||
This class implements a fully compliant Engine.IO web client with support
|
||||
for websocket and long-polling transports.
|
||||
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param request_timeout: A timeout in seconds for requests. The default is
|
||||
5 seconds.
|
||||
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
|
||||
skip SSL certificate verification, allowing
|
||||
connections to servers with self signed certificates.
|
||||
The default is ``True``.
|
||||
"""
|
||||
event_names = ['connect', 'disconnect', 'message']
|
||||
|
||||
def __init__(self,
|
||||
logger=False,
|
||||
json=None,
|
||||
request_timeout=5,
|
||||
ssl_verify=True):
|
||||
global original_signal_handler
|
||||
if original_signal_handler is None:
|
||||
original_signal_handler = signal.signal(signal.SIGINT,
|
||||
signal_handler)
|
||||
self.handlers = {}
|
||||
self.base_url = None
|
||||
self.transports = None
|
||||
self.current_transport = None
|
||||
self.sid = None
|
||||
self.upgrades = None
|
||||
self.ping_interval = None
|
||||
self.ping_timeout = None
|
||||
self.pong_received = True
|
||||
self.http = None
|
||||
self.ws = None
|
||||
self.read_loop_task = None
|
||||
self.write_loop_task = None
|
||||
self.ping_loop_task = None
|
||||
self.ping_loop_event = None
|
||||
self.queue = None
|
||||
self.state = 'disconnected'
|
||||
self.ssl_verify = ssl_verify
|
||||
|
||||
if json is not None:
|
||||
packet.Packet.json = json
|
||||
if not isinstance(logger, bool):
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = default_logger
|
||||
if not logging.root.handlers and \
|
||||
self.logger.level == logging.NOTSET:
|
||||
if logger:
|
||||
self.logger.setLevel(logging.INFO)
|
||||
else:
|
||||
self.logger.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(logging.StreamHandler())
|
||||
|
||||
self.request_timeout = request_timeout
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return False
|
||||
|
||||
def on(self, event, handler=None):
|
||||
"""Register an event handler.
|
||||
|
||||
:param event: The event name. Can be ``'connect'``, ``'message'`` or
|
||||
``'disconnect'``.
|
||||
:param handler: The function that should be invoked to handle the
|
||||
event. When this parameter is not given, the method
|
||||
acts as a decorator for the handler function.
|
||||
|
||||
Example usage::
|
||||
|
||||
# as a decorator:
|
||||
@eio.on('connect')
|
||||
def connect_handler():
|
||||
print('Connection request')
|
||||
|
||||
# as a method:
|
||||
def message_handler(msg):
|
||||
print('Received message: ', msg)
|
||||
eio.send('response')
|
||||
eio.on('message', message_handler)
|
||||
"""
|
||||
if event not in self.event_names:
|
||||
raise ValueError('Invalid event')
|
||||
|
||||
def set_handler(handler):
|
||||
self.handlers[event] = handler
|
||||
return handler
|
||||
|
||||
if handler is None:
|
||||
return set_handler
|
||||
set_handler(handler)
|
||||
|
||||
def connect(self, url, headers={}, transports=None,
|
||||
engineio_path='engine.io'):
|
||||
"""Connect to an Engine.IO server.
|
||||
|
||||
:param url: The URL of the Engine.IO server. It can include custom
|
||||
query string parameters if required by the server.
|
||||
:param headers: A dictionary with custom headers to send with the
|
||||
connection request.
|
||||
:param transports: The list of allowed transports. Valid transports
|
||||
are ``'polling'`` and ``'websocket'``. If not
|
||||
given, the polling transport is connected first,
|
||||
then an upgrade to websocket is attempted.
|
||||
:param engineio_path: The endpoint where the Engine.IO server is
|
||||
installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
eio = engineio.Client()
|
||||
eio.connect('http://localhost:5000')
|
||||
"""
|
||||
if self.state != 'disconnected':
|
||||
raise ValueError('Client is not in a disconnected state')
|
||||
valid_transports = ['polling', 'websocket']
|
||||
if transports is not None:
|
||||
if isinstance(transports, six.string_types):
|
||||
transports = [transports]
|
||||
transports = [transport for transport in transports
|
||||
if transport in valid_transports]
|
||||
if not transports:
|
||||
raise ValueError('No valid transports provided')
|
||||
self.transports = transports or valid_transports
|
||||
self.queue = self.create_queue()
|
||||
return getattr(self, '_connect_' + self.transports[0])(
|
||||
url, headers, engineio_path)
|
||||
|
||||
def wait(self):
|
||||
"""Wait until the connection with the server ends.
|
||||
|
||||
Client applications can use this function to block the main thread
|
||||
during the life of the connection.
|
||||
"""
|
||||
if self.read_loop_task:
|
||||
self.read_loop_task.join()
|
||||
|
||||
def send(self, data, binary=None):
|
||||
"""Send a message to a client.
|
||||
|
||||
:param data: The data to send to the client. Data can be of type
|
||||
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
|
||||
or ``dict``, the data will be serialized as JSON.
|
||||
:param binary: ``True`` to send packet as binary, ``False`` to send
|
||||
as text. If not given, unicode (Python 2) and str
|
||||
(Python 3) are sent as text, and str (Python 2) and
|
||||
bytes (Python 3) are sent as binary.
|
||||
"""
|
||||
self._send_packet(packet.Packet(packet.MESSAGE, data=data,
|
||||
binary=binary))
|
||||
|
||||
def disconnect(self, abort=False):
|
||||
"""Disconnect from the server.
|
||||
|
||||
:param abort: If set to ``True``, do not wait for background tasks
|
||||
associated with the connection to end.
|
||||
"""
|
||||
if self.state == 'connected':
|
||||
self._send_packet(packet.Packet(packet.CLOSE))
|
||||
self.queue.put(None)
|
||||
self.state = 'disconnecting'
|
||||
self._trigger_event('disconnect', run_async=False)
|
||||
if self.current_transport == 'websocket':
|
||||
self.ws.close()
|
||||
if not abort:
|
||||
self.read_loop_task.join()
|
||||
self.state = 'disconnected'
|
||||
try:
|
||||
connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
|
||||
def transport(self):
|
||||
"""Return the name of the transport currently in use.
|
||||
|
||||
The possible values returned by this function are ``'polling'`` and
|
||||
``'websocket'``.
|
||||
"""
|
||||
return self.current_transport
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
th = threading.Thread(target=target, args=args, kwargs=kwargs)
|
||||
th.start()
|
||||
return th
|
||||
|
||||
def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time."""
|
||||
return time.sleep(seconds)
|
||||
|
||||
def create_queue(self, *args, **kwargs):
|
||||
"""Create a queue object."""
|
||||
q = queue.Queue(*args, **kwargs)
|
||||
q.Empty = queue.Empty
|
||||
return q
|
||||
|
||||
def create_event(self, *args, **kwargs):
|
||||
"""Create an event object."""
|
||||
return threading.Event(*args, **kwargs)
|
||||
|
||||
def _reset(self):
|
||||
self.state = 'disconnected'
|
||||
self.sid = None
|
||||
|
||||
def _connect_polling(self, url, headers, engineio_path):
|
||||
"""Establish a long-polling connection to the Engine.IO server."""
|
||||
if requests is None: # pragma: no cover
|
||||
# not installed
|
||||
self.logger.error('requests package is not installed -- cannot '
|
||||
'send HTTP requests!')
|
||||
return
|
||||
self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
|
||||
self.logger.info('Attempting polling connection to ' + self.base_url)
|
||||
r = self._send_request(
|
||||
'GET', self.base_url + self._get_url_timestamp(), headers=headers,
|
||||
timeout=self.request_timeout)
|
||||
if r is None:
|
||||
self._reset()
|
||||
raise exceptions.ConnectionError(
|
||||
'Connection refused by the server')
|
||||
if r.status_code < 200 or r.status_code >= 300:
|
||||
raise exceptions.ConnectionError(
|
||||
'Unexpected status code {} in server response'.format(
|
||||
r.status_code))
|
||||
try:
|
||||
p = payload.Payload(encoded_payload=r.content)
|
||||
except ValueError:
|
||||
six.raise_from(exceptions.ConnectionError(
|
||||
'Unexpected response from server'), None)
|
||||
open_packet = p.packets[0]
|
||||
if open_packet.packet_type != packet.OPEN:
|
||||
raise exceptions.ConnectionError(
|
||||
'OPEN packet not returned by server')
|
||||
self.logger.info(
|
||||
'Polling connection accepted with ' + str(open_packet.data))
|
||||
self.sid = open_packet.data['sid']
|
||||
self.upgrades = open_packet.data['upgrades']
|
||||
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
|
||||
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
|
||||
self.current_transport = 'polling'
|
||||
self.base_url += '&sid=' + self.sid
|
||||
|
||||
self.state = 'connected'
|
||||
connected_clients.append(self)
|
||||
self._trigger_event('connect', run_async=False)
|
||||
|
||||
for pkt in p.packets[1:]:
|
||||
self._receive_packet(pkt)
|
||||
|
||||
if 'websocket' in self.upgrades and 'websocket' in self.transports:
|
||||
# attempt to upgrade to websocket
|
||||
if self._connect_websocket(url, headers, engineio_path):
|
||||
# upgrade to websocket succeeded, we're done here
|
||||
return
|
||||
|
||||
# start background tasks associated with this client
|
||||
self.ping_loop_task = self.start_background_task(self._ping_loop)
|
||||
self.write_loop_task = self.start_background_task(self._write_loop)
|
||||
self.read_loop_task = self.start_background_task(
|
||||
self._read_loop_polling)
|
||||
|
||||
def _connect_websocket(self, url, headers, engineio_path):
|
||||
"""Establish or upgrade to a WebSocket connection with the server."""
|
||||
if websocket is None: # pragma: no cover
|
||||
# not installed
|
||||
self.logger.warning('websocket-client package not installed, only '
|
||||
'polling transport is available')
|
||||
return False
|
||||
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
|
||||
if self.sid:
|
||||
self.logger.info(
|
||||
'Attempting WebSocket upgrade to ' + websocket_url)
|
||||
upgrade = True
|
||||
websocket_url += '&sid=' + self.sid
|
||||
else:
|
||||
upgrade = False
|
||||
self.base_url = websocket_url
|
||||
self.logger.info(
|
||||
'Attempting WebSocket connection to ' + websocket_url)
|
||||
|
||||
# get the cookies from the long-polling connection so that they can
|
||||
# also be sent the the WebSocket route
|
||||
cookies = None
|
||||
if self.http:
|
||||
cookies = '; '.join(["{}={}".format(cookie.name, cookie.value)
|
||||
for cookie in self.http.cookies])
|
||||
|
||||
try:
|
||||
if not self.ssl_verify:
|
||||
ws = websocket.create_connection(
|
||||
websocket_url + self._get_url_timestamp(), header=headers,
|
||||
cookie=cookies, sslopt={"cert_reqs": ssl.CERT_NONE})
|
||||
else:
|
||||
ws = websocket.create_connection(
|
||||
websocket_url + self._get_url_timestamp(), header=headers,
|
||||
cookie=cookies)
|
||||
except (ConnectionError, IOError, websocket.WebSocketException):
|
||||
if upgrade:
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: connection error')
|
||||
return False
|
||||
else:
|
||||
raise exceptions.ConnectionError('Connection error')
|
||||
if upgrade:
|
||||
p = packet.Packet(packet.PING,
|
||||
data=six.text_type('probe')).encode()
|
||||
try:
|
||||
ws.send(p)
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected send exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
try:
|
||||
p = ws.recv()
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected recv exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: no PONG packet')
|
||||
return False
|
||||
p = packet.Packet(packet.UPGRADE).encode()
|
||||
try:
|
||||
ws.send(p)
|
||||
except Exception as e: # pragma: no cover
|
||||
self.logger.warning(
|
||||
'WebSocket upgrade failed: unexpected send exception: %s',
|
||||
str(e))
|
||||
return False
|
||||
self.current_transport = 'websocket'
|
||||
self.logger.info('WebSocket upgrade was successful')
|
||||
else:
|
||||
try:
|
||||
p = ws.recv()
|
||||
except Exception as e: # pragma: no cover
|
||||
raise exceptions.ConnectionError(
|
||||
'Unexpected recv exception: ' + str(e))
|
||||
open_packet = packet.Packet(encoded_packet=p)
|
||||
if open_packet.packet_type != packet.OPEN:
|
||||
raise exceptions.ConnectionError('no OPEN packet')
|
||||
self.logger.info(
|
||||
'WebSocket connection accepted with ' + str(open_packet.data))
|
||||
self.sid = open_packet.data['sid']
|
||||
self.upgrades = open_packet.data['upgrades']
|
||||
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
|
||||
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
|
||||
self.current_transport = 'websocket'
|
||||
|
||||
self.state = 'connected'
|
||||
connected_clients.append(self)
|
||||
self._trigger_event('connect', run_async=False)
|
||||
self.ws = ws
|
||||
|
||||
# start background tasks associated with this client
|
||||
self.ping_loop_task = self.start_background_task(self._ping_loop)
|
||||
self.write_loop_task = self.start_background_task(self._write_loop)
|
||||
self.read_loop_task = self.start_background_task(
|
||||
self._read_loop_websocket)
|
||||
return True
|
||||
|
||||
def _receive_packet(self, pkt):
|
||||
"""Handle incoming packets from the server."""
|
||||
packet_name = packet.packet_names[pkt.packet_type] \
|
||||
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
|
||||
self.logger.info(
|
||||
'Received packet %s data %s', packet_name,
|
||||
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
|
||||
if pkt.packet_type == packet.MESSAGE:
|
||||
self._trigger_event('message', pkt.data, run_async=True)
|
||||
elif pkt.packet_type == packet.PONG:
|
||||
self.pong_received = True
|
||||
elif pkt.packet_type == packet.CLOSE:
|
||||
self.disconnect(abort=True)
|
||||
elif pkt.packet_type == packet.NOOP:
|
||||
pass
|
||||
else:
|
||||
self.logger.error('Received unexpected packet of type %s',
|
||||
pkt.packet_type)
|
||||
|
||||
def _send_packet(self, pkt):
|
||||
"""Queue a packet to be sent to the server."""
|
||||
if self.state != 'connected':
|
||||
return
|
||||
self.queue.put(pkt)
|
||||
self.logger.info(
|
||||
'Sending packet %s data %s',
|
||||
packet.packet_names[pkt.packet_type],
|
||||
pkt.data if not isinstance(pkt.data, bytes) else '<binary>')
|
||||
|
||||
def _send_request(
|
||||
self, method, url, headers=None, body=None,
|
||||
timeout=None): # pragma: no cover
|
||||
if self.http is None:
|
||||
self.http = requests.Session()
|
||||
try:
|
||||
return self.http.request(method, url, headers=headers, data=body,
|
||||
timeout=timeout, verify=self.ssl_verify)
|
||||
except requests.exceptions.RequestException as exc:
|
||||
self.logger.info('HTTP %s request to %s failed with error %s.',
|
||||
method, url, exc)
|
||||
|
||||
def _trigger_event(self, event, *args, **kwargs):
|
||||
"""Invoke an event handler."""
|
||||
run_async = kwargs.pop('run_async', False)
|
||||
if event in self.handlers:
|
||||
if run_async:
|
||||
return self.start_background_task(self.handlers[event], *args)
|
||||
else:
|
||||
try:
|
||||
return self.handlers[event](*args)
|
||||
except:
|
||||
self.logger.exception(event + ' handler error')
|
||||
|
||||
def _get_engineio_url(self, url, engineio_path, transport):
|
||||
"""Generate the Engine.IO connection URL."""
|
||||
engineio_path = engineio_path.strip('/')
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
|
||||
if transport == 'polling':
|
||||
scheme = 'http'
|
||||
elif transport == 'websocket':
|
||||
scheme = 'ws'
|
||||
else: # pragma: no cover
|
||||
raise ValueError('invalid transport')
|
||||
if parsed_url.scheme in ['https', 'wss']:
|
||||
scheme += 's'
|
||||
|
||||
return ('{scheme}://{netloc}/{path}/?{query}'
|
||||
'{sep}transport={transport}&EIO=3').format(
|
||||
scheme=scheme, netloc=parsed_url.netloc,
|
||||
path=engineio_path, query=parsed_url.query,
|
||||
sep='&' if parsed_url.query else '',
|
||||
transport=transport)
|
||||
|
||||
def _get_url_timestamp(self):
|
||||
"""Generate the Engine.IO query string timestamp."""
|
||||
return '&t=' + str(time.time())
|
||||
|
||||
def _ping_loop(self):
|
||||
"""This background task sends a PING to the server at the requested
|
||||
interval.
|
||||
"""
|
||||
self.pong_received = True
|
||||
if self.ping_loop_event is None:
|
||||
self.ping_loop_event = self.create_event()
|
||||
else:
|
||||
self.ping_loop_event.clear()
|
||||
while self.state == 'connected':
|
||||
if not self.pong_received:
|
||||
self.logger.info(
|
||||
'PONG response has not been received, aborting')
|
||||
if self.ws:
|
||||
self.ws.close(timeout=0)
|
||||
self.queue.put(None)
|
||||
break
|
||||
self.pong_received = False
|
||||
self._send_packet(packet.Packet(packet.PING))
|
||||
self.ping_loop_event.wait(timeout=self.ping_interval)
|
||||
self.logger.info('Exiting ping task')
|
||||
|
||||
def _read_loop_polling(self):
|
||||
"""Read packets by polling the Engine.IO server."""
|
||||
while self.state == 'connected':
|
||||
self.logger.info(
|
||||
'Sending polling GET request to ' + self.base_url)
|
||||
r = self._send_request(
|
||||
'GET', self.base_url + self._get_url_timestamp(),
|
||||
timeout=max(self.ping_interval, self.ping_timeout) + 5)
|
||||
if r is None:
|
||||
self.logger.warning(
|
||||
'Connection refused by the server, aborting')
|
||||
self.queue.put(None)
|
||||
break
|
||||
if r.status_code < 200 or r.status_code >= 300:
|
||||
self.logger.warning('Unexpected status code %s in server '
|
||||
'response, aborting', r.status_code)
|
||||
self.queue.put(None)
|
||||
break
|
||||
try:
|
||||
p = payload.Payload(encoded_payload=r.content)
|
||||
except ValueError:
|
||||
self.logger.warning(
|
||||
'Unexpected packet from server, aborting')
|
||||
self.queue.put(None)
|
||||
break
|
||||
for pkt in p.packets:
|
||||
self._receive_packet(pkt)
|
||||
|
||||
self.logger.info('Waiting for write loop task to end')
|
||||
self.write_loop_task.join()
|
||||
self.logger.info('Waiting for ping loop task to end')
|
||||
if self.ping_loop_event: # pragma: no cover
|
||||
self.ping_loop_event.set()
|
||||
self.ping_loop_task.join()
|
||||
if self.state == 'connected':
|
||||
self._trigger_event('disconnect', run_async=False)
|
||||
try:
|
||||
connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
self.logger.info('Exiting read loop task')
|
||||
|
||||
def _read_loop_websocket(self):
|
||||
"""Read packets from the Engine.IO WebSocket connection."""
|
||||
while self.state == 'connected':
|
||||
p = None
|
||||
try:
|
||||
p = self.ws.recv()
|
||||
except websocket.WebSocketConnectionClosedException:
|
||||
self.logger.warning(
|
||||
'WebSocket connection was closed, aborting')
|
||||
self.queue.put(None)
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger.info(
|
||||
'Unexpected error "%s", aborting', str(e))
|
||||
self.queue.put(None)
|
||||
break
|
||||
if isinstance(p, six.text_type): # pragma: no cover
|
||||
p = p.encode('utf-8')
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
self._receive_packet(pkt)
|
||||
|
||||
self.logger.info('Waiting for write loop task to end')
|
||||
self.write_loop_task.join()
|
||||
self.logger.info('Waiting for ping loop task to end')
|
||||
if self.ping_loop_event: # pragma: no cover
|
||||
self.ping_loop_event.set()
|
||||
self.ping_loop_task.join()
|
||||
if self.state == 'connected':
|
||||
self._trigger_event('disconnect', run_async=False)
|
||||
try:
|
||||
connected_clients.remove(self)
|
||||
except ValueError: # pragma: no cover
|
||||
pass
|
||||
self._reset()
|
||||
self.logger.info('Exiting read loop task')
|
||||
|
||||
def _write_loop(self):
|
||||
"""This background task sends packages to the server as they are
|
||||
pushed to the send queue.
|
||||
"""
|
||||
while self.state == 'connected':
|
||||
# to simplify the timeout handling, use the maximum of the
|
||||
# ping interval and ping timeout as timeout, with an extra 5
|
||||
# seconds grace period
|
||||
timeout = max(self.ping_interval, self.ping_timeout) + 5
|
||||
packets = None
|
||||
try:
|
||||
packets = [self.queue.get(timeout=timeout)]
|
||||
except self.queue.Empty:
|
||||
self.logger.error('packet queue is empty, aborting')
|
||||
break
|
||||
if packets == [None]:
|
||||
self.queue.task_done()
|
||||
packets = []
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
packets.append(self.queue.get(block=False))
|
||||
except self.queue.Empty:
|
||||
break
|
||||
if packets[-1] is None:
|
||||
packets = packets[:-1]
|
||||
self.queue.task_done()
|
||||
break
|
||||
if not packets:
|
||||
# empty packet list returned -> connection closed
|
||||
break
|
||||
if self.current_transport == 'polling':
|
||||
p = payload.Payload(packets=packets)
|
||||
r = self._send_request(
|
||||
'POST', self.base_url, body=p.encode(),
|
||||
headers={'Content-Type': 'application/octet-stream'},
|
||||
timeout=self.request_timeout)
|
||||
for pkt in packets:
|
||||
self.queue.task_done()
|
||||
if r is None:
|
||||
self.logger.warning(
|
||||
'Connection refused by the server, aborting')
|
||||
break
|
||||
if r.status_code < 200 or r.status_code >= 300:
|
||||
self.logger.warning('Unexpected status code %s in server '
|
||||
'response, aborting', r.status_code)
|
||||
self._reset()
|
||||
break
|
||||
else:
|
||||
# websocket
|
||||
try:
|
||||
for pkt in packets:
|
||||
encoded_packet = pkt.encode(always_bytes=False)
|
||||
if pkt.binary:
|
||||
self.ws.send_binary(encoded_packet)
|
||||
else:
|
||||
self.ws.send(encoded_packet)
|
||||
self.queue.task_done()
|
||||
except websocket.WebSocketConnectionClosedException:
|
||||
self.logger.warning(
|
||||
'WebSocket connection was closed, aborting')
|
||||
break
|
||||
self.logger.info('Exiting write loop task')
|
@ -0,0 +1,22 @@
|
||||
class EngineIOError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ContentTooLongError(EngineIOError):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownPacketError(EngineIOError):
|
||||
pass
|
||||
|
||||
|
||||
class QueueEmpty(EngineIOError):
|
||||
pass
|
||||
|
||||
|
||||
class SocketIsClosedError(EngineIOError):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(EngineIOError):
|
||||
pass
|
@ -0,0 +1,87 @@
|
||||
import os
|
||||
from engineio.static_files import get_static_file
|
||||
|
||||
|
||||
class WSGIApp(object):
|
||||
"""WSGI application middleware for Engine.IO.
|
||||
|
||||
This middleware dispatches traffic to an Engine.IO application. It can
|
||||
also serve a list of static files to the client, or forward unrelated
|
||||
HTTP traffic to another WSGI application.
|
||||
|
||||
:param engineio_app: The Engine.IO server. Must be an instance of the
|
||||
``engineio.Server`` class.
|
||||
:param wsgi_app: The WSGI app that receives all other traffic.
|
||||
:param static_files: A dictionary with static file mapping rules. See the
|
||||
documentation for details on this argument.
|
||||
:param engineio_path: The endpoint where the Engine.IO application should
|
||||
be installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
import engineio
|
||||
import eventlet
|
||||
|
||||
eio = engineio.Server()
|
||||
app = engineio.WSGIApp(eio, static_files={
|
||||
'/': {'content_type': 'text/html', 'filename': 'index.html'},
|
||||
'/index.html': {'content_type': 'text/html',
|
||||
'filename': 'index.html'},
|
||||
})
|
||||
eventlet.wsgi.server(eventlet.listen(('', 8000)), app)
|
||||
"""
|
||||
def __init__(self, engineio_app, wsgi_app=None, static_files=None,
|
||||
engineio_path='engine.io'):
|
||||
self.engineio_app = engineio_app
|
||||
self.wsgi_app = wsgi_app
|
||||
self.engineio_path = engineio_path.strip('/')
|
||||
self.static_files = static_files or {}
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
if 'gunicorn.socket' in environ:
|
||||
# gunicorn saves the socket under environ['gunicorn.socket'], while
|
||||
# eventlet saves it under environ['eventlet.input']. Eventlet also
|
||||
# stores the socket inside a wrapper class, while gunicon writes it
|
||||
# directly into the environment. To give eventlet's WebSocket
|
||||
# module access to this socket when running under gunicorn, here we
|
||||
# copy the socket to the eventlet format.
|
||||
class Input(object):
|
||||
def __init__(self, socket):
|
||||
self.socket = socket
|
||||
|
||||
def get_socket(self):
|
||||
return self.socket
|
||||
|
||||
environ['eventlet.input'] = Input(environ['gunicorn.socket'])
|
||||
path = environ['PATH_INFO']
|
||||
if path is not None and \
|
||||
path.startswith('/{0}/'.format(self.engineio_path)):
|
||||
return self.engineio_app.handle_request(environ, start_response)
|
||||
else:
|
||||
static_file = get_static_file(path, self.static_files) \
|
||||
if self.static_files else None
|
||||
if static_file:
|
||||
if os.path.exists(static_file['filename']):
|
||||
start_response(
|
||||
'200 OK',
|
||||
[('Content-Type', static_file['content_type'])])
|
||||
with open(static_file['filename'], 'rb') as f:
|
||||
return [f.read()]
|
||||
else:
|
||||
return self.not_found(start_response)
|
||||
elif self.wsgi_app is not None:
|
||||
return self.wsgi_app(environ, start_response)
|
||||
return self.not_found(start_response)
|
||||
|
||||
def not_found(self, start_response):
|
||||
start_response("404 Not Found", [('Content-Type', 'text/plain')])
|
||||
return [b'Not Found']
|
||||
|
||||
|
||||
class Middleware(WSGIApp):
|
||||
"""This class has been renamed to ``WSGIApp`` and is now deprecated."""
|
||||
def __init__(self, engineio_app, wsgi_app=None,
|
||||
engineio_path='engine.io'):
|
||||
super(Middleware, self).__init__(engineio_app, wsgi_app,
|
||||
engineio_path=engineio_path)
|
@ -0,0 +1,92 @@
|
||||
import base64
|
||||
import json as _json
|
||||
|
||||
import six
|
||||
|
||||
(OPEN, CLOSE, PING, PONG, MESSAGE, UPGRADE, NOOP) = (0, 1, 2, 3, 4, 5, 6)
|
||||
packet_names = ['OPEN', 'CLOSE', 'PING', 'PONG', 'MESSAGE', 'UPGRADE', 'NOOP']
|
||||
|
||||
binary_types = (six.binary_type, bytearray)
|
||||
|
||||
|
||||
class Packet(object):
|
||||
"""Engine.IO packet."""
|
||||
|
||||
json = _json
|
||||
|
||||
def __init__(self, packet_type=NOOP, data=None, binary=None,
|
||||
encoded_packet=None):
|
||||
self.packet_type = packet_type
|
||||
self.data = data
|
||||
if binary is not None:
|
||||
self.binary = binary
|
||||
elif isinstance(data, six.text_type):
|
||||
self.binary = False
|
||||
elif isinstance(data, binary_types):
|
||||
self.binary = True
|
||||
else:
|
||||
self.binary = False
|
||||
if encoded_packet:
|
||||
self.decode(encoded_packet)
|
||||
|
||||
def encode(self, b64=False, always_bytes=True):
|
||||
"""Encode the packet for transmission."""
|
||||
if self.binary and not b64:
|
||||
encoded_packet = six.int2byte(self.packet_type)
|
||||
else:
|
||||
encoded_packet = six.text_type(self.packet_type)
|
||||
if self.binary and b64:
|
||||
encoded_packet = 'b' + encoded_packet
|
||||
if self.binary:
|
||||
if b64:
|
||||
encoded_packet += base64.b64encode(self.data).decode('utf-8')
|
||||
else:
|
||||
encoded_packet += self.data
|
||||
elif isinstance(self.data, six.string_types):
|
||||
encoded_packet += self.data
|
||||
elif isinstance(self.data, dict) or isinstance(self.data, list):
|
||||
encoded_packet += self.json.dumps(self.data,
|
||||
separators=(',', ':'))
|
||||
elif self.data is not None:
|
||||
encoded_packet += str(self.data)
|
||||
if always_bytes and not isinstance(encoded_packet, binary_types):
|
||||
encoded_packet = encoded_packet.encode('utf-8')
|
||||
return encoded_packet
|
||||
|
||||
def decode(self, encoded_packet):
|
||||
"""Decode a transmitted package."""
|
||||
b64 = False
|
||||
if not isinstance(encoded_packet, binary_types):
|
||||
encoded_packet = encoded_packet.encode('utf-8')
|
||||
elif not isinstance(encoded_packet, bytes):
|
||||
encoded_packet = bytes(encoded_packet)
|
||||
self.packet_type = six.byte2int(encoded_packet[0:1])
|
||||
if self.packet_type == 98: # 'b' --> binary base64 encoded packet
|
||||
self.binary = True
|
||||
encoded_packet = encoded_packet[1:]
|
||||
self.packet_type = six.byte2int(encoded_packet[0:1])
|
||||
self.packet_type -= 48
|
||||
b64 = True
|
||||
elif self.packet_type >= 48:
|
||||
self.packet_type -= 48
|
||||
self.binary = False
|
||||
else:
|
||||
self.binary = True
|
||||
self.data = None
|
||||
if len(encoded_packet) > 1:
|
||||
if self.binary:
|
||||
if b64:
|
||||
self.data = base64.b64decode(encoded_packet[1:])
|
||||
else:
|
||||
self.data = encoded_packet[1:]
|
||||
else:
|
||||
try:
|
||||
self.data = self.json.loads(
|
||||
encoded_packet[1:].decode('utf-8'))
|
||||
if isinstance(self.data, int):
|
||||
# do not allow integer payloads, see
|
||||
# github.com/miguelgrinberg/python-engineio/issues/75
|
||||
# for background on this decision
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
self.data = encoded_packet[1:].decode('utf-8')
|
@ -0,0 +1,81 @@
|
||||
import six
|
||||
|
||||
from . import packet
|
||||
|
||||
from six.moves import urllib
|
||||
|
||||
|
||||
class Payload(object):
|
||||
"""Engine.IO payload."""
|
||||
max_decode_packets = 16
|
||||
|
||||
def __init__(self, packets=None, encoded_payload=None):
|
||||
self.packets = packets or []
|
||||
if encoded_payload is not None:
|
||||
self.decode(encoded_payload)
|
||||
|
||||
def encode(self, b64=False, jsonp_index=None):
|
||||
"""Encode the payload for transmission."""
|
||||
encoded_payload = b''
|
||||
for pkt in self.packets:
|
||||
encoded_packet = pkt.encode(b64=b64)
|
||||
packet_len = len(encoded_packet)
|
||||
if b64:
|
||||
encoded_payload += str(packet_len).encode('utf-8') + b':' + \
|
||||
encoded_packet
|
||||
else:
|
||||
binary_len = b''
|
||||
while packet_len != 0:
|
||||
binary_len = six.int2byte(packet_len % 10) + binary_len
|
||||
packet_len = int(packet_len / 10)
|
||||
if not pkt.binary:
|
||||
encoded_payload += b'\0'
|
||||
else:
|
||||
encoded_payload += b'\1'
|
||||
encoded_payload += binary_len + b'\xff' + encoded_packet
|
||||
if jsonp_index is not None:
|
||||
encoded_payload = b'___eio[' + \
|
||||
str(jsonp_index).encode() + \
|
||||
b']("' + \
|
||||
encoded_payload.replace(b'"', b'\\"') + \
|
||||
b'");'
|
||||
return encoded_payload
|
||||
|
||||
def decode(self, encoded_payload):
|
||||
"""Decode a transmitted payload."""
|
||||
self.packets = []
|
||||
|
||||
if len(encoded_payload) == 0:
|
||||
return
|
||||
|
||||
# JSONP POST payload starts with 'd='
|
||||
if encoded_payload.startswith(b'd='):
|
||||
encoded_payload = urllib.parse.parse_qs(
|
||||
encoded_payload)[b'd'][0]
|
||||
|
||||
i = 0
|
||||
if six.byte2int(encoded_payload[0:1]) <= 1:
|
||||
# binary encoding
|
||||
while i < len(encoded_payload):
|
||||
if len(self.packets) >= self.max_decode_packets:
|
||||
raise ValueError('Too many packets in payload')
|
||||
packet_len = 0
|
||||
i += 1
|
||||
while six.byte2int(encoded_payload[i:i + 1]) != 255:
|
||||
packet_len = packet_len * 10 + six.byte2int(
|
||||
encoded_payload[i:i + 1])
|
||||
i += 1
|
||||
self.packets.append(packet.Packet(
|
||||
encoded_packet=encoded_payload[i + 1:i + 1 + packet_len]))
|
||||
i += packet_len + 1
|
||||
else:
|
||||
# assume text encoding
|
||||
encoded_payload = encoded_payload.decode('utf-8')
|
||||
while i < len(encoded_payload):
|
||||
if len(self.packets) >= self.max_decode_packets:
|
||||
raise ValueError('Too many packets in payload')
|
||||
j = encoded_payload.find(':', i)
|
||||
packet_len = int(encoded_payload[i:j])
|
||||
pkt = encoded_payload[j + 1:j + 1 + packet_len]
|
||||
self.packets.append(packet.Packet(encoded_packet=pkt))
|
||||
i = j + 1 + packet_len
|
@ -0,0 +1,675 @@
|
||||
import gzip
|
||||
import importlib
|
||||
import logging
|
||||
import uuid
|
||||
import zlib
|
||||
|
||||
import six
|
||||
from six.moves import urllib
|
||||
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import payload
|
||||
from . import socket
|
||||
|
||||
default_logger = logging.getLogger('engineio.server')
|
||||
|
||||
|
||||
class Server(object):
|
||||
"""An Engine.IO server.
|
||||
|
||||
This class implements a fully compliant Engine.IO web server with support
|
||||
for websocket and long-polling transports.
|
||||
|
||||
:param async_mode: The asynchronous model to use. See the Deployment
|
||||
section in the documentation for a description of the
|
||||
available options. Valid async modes are "threading",
|
||||
"eventlet", "gevent" and "gevent_uwsgi". If this
|
||||
argument is not given, "eventlet" is tried first, then
|
||||
"gevent_uwsgi", then "gevent", and finally "threading".
|
||||
The first async mode that has all its dependencies
|
||||
installed is the one that is chosen.
|
||||
:param ping_timeout: The time in seconds that the client waits for the
|
||||
server to respond before disconnecting. The default
|
||||
is 60 seconds.
|
||||
:param ping_interval: The interval in seconds at which the client pings
|
||||
the server. The default is 25 seconds. For advanced
|
||||
control, a two element tuple can be given, where
|
||||
the first number is the ping interval and the second
|
||||
is a grace period added by the server. The default
|
||||
grace period is 5 seconds.
|
||||
:param max_http_buffer_size: The maximum size of a message when using the
|
||||
polling transport. The default is 100,000,000
|
||||
bytes.
|
||||
:param allow_upgrades: Whether to allow transport upgrades or not. The
|
||||
default is ``True``.
|
||||
:param http_compression: Whether to compress packages when using the
|
||||
polling transport. The default is ``True``.
|
||||
:param compression_threshold: Only compress messages when their byte size
|
||||
is greater than this value. The default is
|
||||
1024 bytes.
|
||||
:param cookie: Name of the HTTP cookie that contains the client session
|
||||
id. If set to ``None``, a cookie is not sent to the client.
|
||||
The default is ``'io'``.
|
||||
:param cors_allowed_origins: Origin or list of origins that are allowed to
|
||||
connect to this server. Only the same origin
|
||||
is allowed by default. Set this argument to
|
||||
``'*'`` to allow all origins, or to ``[]`` to
|
||||
disable CORS handling.
|
||||
:param cors_credentials: Whether credentials (cookies, authentication) are
|
||||
allowed in requests to this server. The default
|
||||
is ``True``.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param async_handlers: If set to ``True``, run message event handlers in
|
||||
non-blocking threads. To run handlers synchronously,
|
||||
set to ``False``. The default is ``True``.
|
||||
:param monitor_clients: If set to ``True``, a background task will ensure
|
||||
inactive clients are closed. Set to ``False`` to
|
||||
disable the monitoring task (not recommended). The
|
||||
default is ``True``.
|
||||
:param kwargs: Reserved for future extensions, any additional parameters
|
||||
given as keyword arguments will be silently ignored.
|
||||
"""
|
||||
compression_methods = ['gzip', 'deflate']
|
||||
event_names = ['connect', 'disconnect', 'message']
|
||||
_default_monitor_clients = True
|
||||
|
||||
def __init__(self, async_mode=None, ping_timeout=60, ping_interval=25,
|
||||
max_http_buffer_size=100000000, allow_upgrades=True,
|
||||
http_compression=True, compression_threshold=1024,
|
||||
cookie='io', cors_allowed_origins=None,
|
||||
cors_credentials=True, logger=False, json=None,
|
||||
async_handlers=True, monitor_clients=None, **kwargs):
|
||||
self.ping_timeout = ping_timeout
|
||||
if isinstance(ping_interval, tuple):
|
||||
self.ping_interval = ping_interval[0]
|
||||
self.ping_interval_grace_period = ping_interval[1]
|
||||
else:
|
||||
self.ping_interval = ping_interval
|
||||
self.ping_interval_grace_period = 5
|
||||
self.max_http_buffer_size = max_http_buffer_size
|
||||
self.allow_upgrades = allow_upgrades
|
||||
self.http_compression = http_compression
|
||||
self.compression_threshold = compression_threshold
|
||||
self.cookie = cookie
|
||||
self.cors_allowed_origins = cors_allowed_origins
|
||||
self.cors_credentials = cors_credentials
|
||||
self.async_handlers = async_handlers
|
||||
self.sockets = {}
|
||||
self.handlers = {}
|
||||
self.start_service_task = monitor_clients \
|
||||
if monitor_clients is not None else self._default_monitor_clients
|
||||
if json is not None:
|
||||
packet.Packet.json = json
|
||||
if not isinstance(logger, bool):
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = default_logger
|
||||
if not logging.root.handlers and \
|
||||
self.logger.level == logging.NOTSET:
|
||||
if logger:
|
||||
self.logger.setLevel(logging.INFO)
|
||||
else:
|
||||
self.logger.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(logging.StreamHandler())
|
||||
modes = self.async_modes()
|
||||
if async_mode is not None:
|
||||
modes = [async_mode] if async_mode in modes else []
|
||||
self._async = None
|
||||
self.async_mode = None
|
||||
for mode in modes:
|
||||
try:
|
||||
self._async = importlib.import_module(
|
||||
'engineio.async_drivers.' + mode)._async
|
||||
asyncio_based = self._async['asyncio'] \
|
||||
if 'asyncio' in self._async else False
|
||||
if asyncio_based != self.is_asyncio_based():
|
||||
continue # pragma: no cover
|
||||
self.async_mode = mode
|
||||
break
|
||||
except ImportError:
|
||||
pass
|
||||
if self.async_mode is None:
|
||||
raise ValueError('Invalid async_mode specified')
|
||||
if self.is_asyncio_based() and \
|
||||
('asyncio' not in self._async or not
|
||||
self._async['asyncio']): # pragma: no cover
|
||||
raise ValueError('The selected async_mode is not asyncio '
|
||||
'compatible')
|
||||
if not self.is_asyncio_based() and 'asyncio' in self._async and \
|
||||
self._async['asyncio']: # pragma: no cover
|
||||
raise ValueError('The selected async_mode requires asyncio and '
|
||||
'must use the AsyncServer class')
|
||||
self.logger.info('Server initialized for %s.', self.async_mode)
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return False
|
||||
|
||||
def async_modes(self):
|
||||
return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading']
|
||||
|
||||
def on(self, event, handler=None):
|
||||
"""Register an event handler.
|
||||
|
||||
:param event: The event name. Can be ``'connect'``, ``'message'`` or
|
||||
``'disconnect'``.
|
||||
:param handler: The function that should be invoked to handle the
|
||||
event. When this parameter is not given, the method
|
||||
acts as a decorator for the handler function.
|
||||
|
||||
Example usage::
|
||||
|
||||
# as a decorator:
|
||||
@eio.on('connect')
|
||||
def connect_handler(sid, environ):
|
||||
print('Connection request')
|
||||
if environ['REMOTE_ADDR'] in blacklisted:
|
||||
return False # reject
|
||||
|
||||
# as a method:
|
||||
def message_handler(sid, msg):
|
||||
print('Received message: ', msg)
|
||||
eio.send(sid, 'response')
|
||||
eio.on('message', message_handler)
|
||||
|
||||
The handler function receives the ``sid`` (session ID) for the
|
||||
client as first argument. The ``'connect'`` event handler receives the
|
||||
WSGI environment as a second argument, and can return ``False`` to
|
||||
reject the connection. The ``'message'`` handler receives the message
|
||||
payload as a second argument. The ``'disconnect'`` handler does not
|
||||
take a second argument.
|
||||
"""
|
||||
if event not in self.event_names:
|
||||
raise ValueError('Invalid event')
|
||||
|
||||
def set_handler(handler):
|
||||
self.handlers[event] = handler
|
||||
return handler
|
||||
|
||||
if handler is None:
|
||||
return set_handler
|
||||
set_handler(handler)
|
||||
|
||||
def send(self, sid, data, binary=None):
|
||||
"""Send a message to a client.
|
||||
|
||||
:param sid: The session id of the recipient client.
|
||||
:param data: The data to send to the client. Data can be of type
|
||||
``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
|
||||
or ``dict``, the data will be serialized as JSON.
|
||||
:param binary: ``True`` to send packet as binary, ``False`` to send
|
||||
as text. If not given, unicode (Python 2) and str
|
||||
(Python 3) are sent as text, and str (Python 2) and
|
||||
bytes (Python 3) are sent as binary.
|
||||
"""
|
||||
try:
|
||||
socket = self._get_socket(sid)
|
||||
except KeyError:
|
||||
# the socket is not available
|
||||
self.logger.warning('Cannot send to sid %s', sid)
|
||||
return
|
||||
socket.send(packet.Packet(packet.MESSAGE, data=data, binary=binary))
|
||||
|
||||
def get_session(self, sid):
|
||||
"""Return the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
The return value is a dictionary. Modifications made to this
|
||||
dictionary are not guaranteed to be preserved unless
|
||||
``save_session()`` is called, or when the ``session`` context manager
|
||||
is used.
|
||||
"""
|
||||
socket = self._get_socket(sid)
|
||||
return socket.session
|
||||
|
||||
def save_session(self, sid, session):
|
||||
"""Store the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param session: The session dictionary.
|
||||
"""
|
||||
socket = self._get_socket(sid)
|
||||
socket.session = session
|
||||
|
||||
def session(self, sid):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
This is a context manager that returns the user session dictionary for
|
||||
the client. Any changes that are made to this dictionary inside the
|
||||
context manager block are saved back to the session. Example usage::
|
||||
|
||||
@eio.on('connect')
|
||||
def on_connect(sid, environ):
|
||||
username = authenticate_user(environ)
|
||||
if not username:
|
||||
return False
|
||||
with eio.session(sid) as session:
|
||||
session['username'] = username
|
||||
|
||||
@eio.on('message')
|
||||
def on_message(sid, msg):
|
||||
with eio.session(sid) as session:
|
||||
print('received message from ', session['username'])
|
||||
"""
|
||||
class _session_context_manager(object):
|
||||
def __init__(self, server, sid):
|
||||
self.server = server
|
||||
self.sid = sid
|
||||
self.session = None
|
||||
|
||||
def __enter__(self):
|
||||
self.session = self.server.get_session(sid)
|
||||
return self.session
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.server.save_session(sid, self.session)
|
||||
|
||||
return _session_context_manager(self, sid)
|
||||
|
||||
def disconnect(self, sid=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
:param sid: The session id of the client to close. If this parameter
|
||||
is not given, then all clients are closed.
|
||||
"""
|
||||
if sid is not None:
|
||||
try:
|
||||
socket = self._get_socket(sid)
|
||||
except KeyError: # pragma: no cover
|
||||
# the socket was already closed or gone
|
||||
pass
|
||||
else:
|
||||
socket.close()
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
del self.sockets[sid]
|
||||
else:
|
||||
for client in six.itervalues(self.sockets):
|
||||
client.close()
|
||||
self.sockets = {}
|
||||
|
||||
def transport(self, sid):
|
||||
"""Return the name of the transport used by the client.
|
||||
|
||||
The two possible values returned by this function are ``'polling'``
|
||||
and ``'websocket'``.
|
||||
|
||||
:param sid: The session of the client.
|
||||
"""
|
||||
return 'websocket' if self._get_socket(sid).upgraded else 'polling'
|
||||
|
||||
def handle_request(self, environ, start_response):
|
||||
"""Handle an HTTP request from the client.
|
||||
|
||||
This is the entry point of the Engine.IO application, using the same
|
||||
interface as a WSGI application. For the typical usage, this function
|
||||
is invoked by the :class:`Middleware` instance, but it can be invoked
|
||||
directly when the middleware is not used.
|
||||
|
||||
:param environ: The WSGI environment.
|
||||
:param start_response: The WSGI ``start_response`` function.
|
||||
|
||||
This function returns the HTTP response body to deliver to the client
|
||||
as a byte sequence.
|
||||
"""
|
||||
if self.cors_allowed_origins != []:
|
||||
# Validate the origin header if present
|
||||
# This is important for WebSocket more than for HTTP, since
|
||||
# browsers only apply CORS controls to HTTP.
|
||||
origin = environ.get('HTTP_ORIGIN')
|
||||
if origin:
|
||||
allowed_origins = self._cors_allowed_origins(environ)
|
||||
if allowed_origins is not None and origin not in \
|
||||
allowed_origins:
|
||||
self.logger.info(origin + ' is not an accepted origin.')
|
||||
r = self._bad_request()
|
||||
start_response(r['status'], r['headers'])
|
||||
return [r['response']]
|
||||
|
||||
method = environ['REQUEST_METHOD']
|
||||
query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
|
||||
|
||||
sid = query['sid'][0] if 'sid' in query else None
|
||||
b64 = False
|
||||
jsonp = False
|
||||
jsonp_index = None
|
||||
|
||||
if 'b64' in query:
|
||||
if query['b64'][0] == "1" or query['b64'][0].lower() == "true":
|
||||
b64 = True
|
||||
if 'j' in query:
|
||||
jsonp = True
|
||||
try:
|
||||
jsonp_index = int(query['j'][0])
|
||||
except (ValueError, KeyError, IndexError):
|
||||
# Invalid JSONP index number
|
||||
pass
|
||||
|
||||
if jsonp and jsonp_index is None:
|
||||
self.logger.warning('Invalid JSONP index number')
|
||||
r = self._bad_request()
|
||||
elif method == 'GET':
|
||||
if sid is None:
|
||||
transport = query.get('transport', ['polling'])[0]
|
||||
if transport != 'polling' and transport != 'websocket':
|
||||
self.logger.warning('Invalid transport %s', transport)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
r = self._handle_connect(environ, start_response,
|
||||
transport, b64, jsonp_index)
|
||||
else:
|
||||
if sid not in self.sockets:
|
||||
self.logger.warning('Invalid session %s', sid)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
socket = self._get_socket(sid)
|
||||
try:
|
||||
packets = socket.handle_get_request(
|
||||
environ, start_response)
|
||||
if isinstance(packets, list):
|
||||
r = self._ok(packets, b64=b64,
|
||||
jsonp_index=jsonp_index)
|
||||
else:
|
||||
r = packets
|
||||
except exceptions.EngineIOError:
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
self.disconnect(sid)
|
||||
r = self._bad_request()
|
||||
if sid in self.sockets and self.sockets[sid].closed:
|
||||
del self.sockets[sid]
|
||||
elif method == 'POST':
|
||||
if sid is None or sid not in self.sockets:
|
||||
self.logger.warning('Invalid session %s', sid)
|
||||
r = self._bad_request()
|
||||
else:
|
||||
socket = self._get_socket(sid)
|
||||
try:
|
||||
socket.handle_post_request(environ)
|
||||
r = self._ok(jsonp_index=jsonp_index)
|
||||
except exceptions.EngineIOError:
|
||||
if sid in self.sockets: # pragma: no cover
|
||||
self.disconnect(sid)
|
||||
r = self._bad_request()
|
||||
except: # pragma: no cover
|
||||
# for any other unexpected errors, we log the error
|
||||
# and keep going
|
||||
self.logger.exception('post request handler error')
|
||||
r = self._ok(jsonp_index=jsonp_index)
|
||||
elif method == 'OPTIONS':
|
||||
r = self._ok()
|
||||
else:
|
||||
self.logger.warning('Method %s not supported', method)
|
||||
r = self._method_not_found()
|
||||
|
||||
if not isinstance(r, dict):
|
||||
return r or []
|
||||
if self.http_compression and \
|
||||
len(r['response']) >= self.compression_threshold:
|
||||
encodings = [e.split(';')[0].strip() for e in
|
||||
environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
|
||||
for encoding in encodings:
|
||||
if encoding in self.compression_methods:
|
||||
r['response'] = \
|
||||
getattr(self, '_' + encoding)(r['response'])
|
||||
r['headers'] += [('Content-Encoding', encoding)]
|
||||
break
|
||||
cors_headers = self._cors_headers(environ)
|
||||
start_response(r['status'], r['headers'] + cors_headers)
|
||||
return [r['response']]
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
th = self._async['thread'](target=target, args=args, kwargs=kwargs)
|
||||
th.start()
|
||||
return th # pragma: no cover
|
||||
|
||||
def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self._async['sleep'](seconds)
|
||||
|
||||
def create_queue(self, *args, **kwargs):
|
||||
"""Create a queue object using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to create a queue
|
||||
without having to worry about using the correct call for the selected
|
||||
async mode.
|
||||
"""
|
||||
return self._async['queue'](*args, **kwargs)
|
||||
|
||||
def get_queue_empty_exception(self):
|
||||
"""Return the queue empty exception for the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to work with a
|
||||
queue without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self._async['queue_empty']
|
||||
|
||||
def create_event(self, *args, **kwargs):
|
||||
"""Create an event object using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to create an
|
||||
event without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self._async['event'](*args, **kwargs)
|
||||
|
||||
def _generate_id(self):
|
||||
"""Generate a unique session id."""
|
||||
return uuid.uuid4().hex
|
||||
|
||||
def _handle_connect(self, environ, start_response, transport, b64=False,
|
||||
jsonp_index=None):
|
||||
"""Handle a client connection request."""
|
||||
if self.start_service_task:
|
||||
# start the service task to monitor connected clients
|
||||
self.start_service_task = False
|
||||
self.start_background_task(self._service_task)
|
||||
|
||||
sid = self._generate_id()
|
||||
s = socket.Socket(self, sid)
|
||||
self.sockets[sid] = s
|
||||
|
||||
pkt = packet.Packet(
|
||||
packet.OPEN, {'sid': sid,
|
||||
'upgrades': self._upgrades(sid, transport),
|
||||
'pingTimeout': int(self.ping_timeout * 1000),
|
||||
'pingInterval': int(self.ping_interval * 1000)})
|
||||
s.send(pkt)
|
||||
|
||||
ret = self._trigger_event('connect', sid, environ, run_async=False)
|
||||
if ret is False:
|
||||
del self.sockets[sid]
|
||||
self.logger.warning('Application rejected connection')
|
||||
return self._unauthorized()
|
||||
|
||||
if transport == 'websocket':
|
||||
ret = s.handle_get_request(environ, start_response)
|
||||
if s.closed:
|
||||
# websocket connection ended, so we are done
|
||||
del self.sockets[sid]
|
||||
return ret
|
||||
else:
|
||||
s.connected = True
|
||||
headers = None
|
||||
if self.cookie:
|
||||
headers = [('Set-Cookie', self.cookie + '=' + sid)]
|
||||
try:
|
||||
return self._ok(s.poll(), headers=headers, b64=b64,
|
||||
jsonp_index=jsonp_index)
|
||||
except exceptions.QueueEmpty:
|
||||
return self._bad_request()
|
||||
|
||||
def _upgrades(self, sid, transport):
|
||||
"""Return the list of possible upgrades for a client connection."""
|
||||
if not self.allow_upgrades or self._get_socket(sid).upgraded or \
|
||||
self._async['websocket'] is None or transport == 'websocket':
|
||||
return []
|
||||
return ['websocket']
|
||||
|
||||
def _trigger_event(self, event, *args, **kwargs):
|
||||
"""Invoke an event handler."""
|
||||
run_async = kwargs.pop('run_async', False)
|
||||
if event in self.handlers:
|
||||
if run_async:
|
||||
return self.start_background_task(self.handlers[event], *args)
|
||||
else:
|
||||
try:
|
||||
return self.handlers[event](*args)
|
||||
except:
|
||||
self.logger.exception(event + ' handler error')
|
||||
if event == 'connect':
|
||||
# if connect handler raised error we reject the
|
||||
# connection
|
||||
return False
|
||||
|
||||
def _get_socket(self, sid):
|
||||
"""Return the socket object for a given session."""
|
||||
try:
|
||||
s = self.sockets[sid]
|
||||
except KeyError:
|
||||
raise KeyError('Session not found')
|
||||
if s.closed:
|
||||
del self.sockets[sid]
|
||||
raise KeyError('Session is disconnected')
|
||||
return s
|
||||
|
||||
def _ok(self, packets=None, headers=None, b64=False, jsonp_index=None):
|
||||
"""Generate a successful HTTP response."""
|
||||
if packets is not None:
|
||||
if headers is None:
|
||||
headers = []
|
||||
if b64:
|
||||
headers += [('Content-Type', 'text/plain; charset=UTF-8')]
|
||||
else:
|
||||
headers += [('Content-Type', 'application/octet-stream')]
|
||||
return {'status': '200 OK',
|
||||
'headers': headers,
|
||||
'response': payload.Payload(packets=packets).encode(
|
||||
b64=b64, jsonp_index=jsonp_index)}
|
||||
else:
|
||||
return {'status': '200 OK',
|
||||
'headers': [('Content-Type', 'text/plain')],
|
||||
'response': b'OK'}
|
||||
|
||||
def _bad_request(self):
|
||||
"""Generate a bad request HTTP error response."""
|
||||
return {'status': '400 BAD REQUEST',
|
||||
'headers': [('Content-Type', 'text/plain')],
|
||||
'response': b'Bad Request'}
|
||||
|
||||
def _method_not_found(self):
|
||||
"""Generate a method not found HTTP error response."""
|
||||
return {'status': '405 METHOD NOT FOUND',
|
||||
'headers': [('Content-Type', 'text/plain')],
|
||||
'response': b'Method Not Found'}
|
||||
|
||||
def _unauthorized(self):
|
||||
"""Generate a unauthorized HTTP error response."""
|
||||
return {'status': '401 UNAUTHORIZED',
|
||||
'headers': [('Content-Type', 'text/plain')],
|
||||
'response': b'Unauthorized'}
|
||||
|
||||
def _cors_allowed_origins(self, environ):
|
||||
default_origins = []
|
||||
if 'wsgi.url_scheme' in environ and 'HTTP_HOST' in environ:
|
||||
default_origins.append('{scheme}://{host}'.format(
|
||||
scheme=environ['wsgi.url_scheme'], host=environ['HTTP_HOST']))
|
||||
if 'HTTP_X_FORWARDED_HOST' in environ:
|
||||
scheme = environ.get(
|
||||
'HTTP_X_FORWARDED_PROTO',
|
||||
environ['wsgi.url_scheme']).split(',')[0].strip()
|
||||
default_origins.append('{scheme}://{host}'.format(
|
||||
scheme=scheme, host=environ['HTTP_X_FORWARDED_HOST'].split(
|
||||
',')[0].strip()))
|
||||
if self.cors_allowed_origins is None:
|
||||
allowed_origins = default_origins
|
||||
elif self.cors_allowed_origins == '*':
|
||||
allowed_origins = None
|
||||
elif isinstance(self.cors_allowed_origins, six.string_types):
|
||||
allowed_origins = [self.cors_allowed_origins]
|
||||
else:
|
||||
allowed_origins = self.cors_allowed_origins
|
||||
return allowed_origins
|
||||
|
||||
def _cors_headers(self, environ):
|
||||
"""Return the cross-origin-resource-sharing headers."""
|
||||
if self.cors_allowed_origins == []:
|
||||
# special case, CORS handling is completely disabled
|
||||
return []
|
||||
headers = []
|
||||
allowed_origins = self._cors_allowed_origins(environ)
|
||||
if 'HTTP_ORIGIN' in environ and \
|
||||
(allowed_origins is None or environ['HTTP_ORIGIN'] in
|
||||
allowed_origins):
|
||||
headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])]
|
||||
if environ['REQUEST_METHOD'] == 'OPTIONS':
|
||||
headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')]
|
||||
if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ:
|
||||
headers += [('Access-Control-Allow-Headers',
|
||||
environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])]
|
||||
if self.cors_credentials:
|
||||
headers += [('Access-Control-Allow-Credentials', 'true')]
|
||||
return headers
|
||||
|
||||
def _gzip(self, response):
|
||||
"""Apply gzip compression to a response."""
|
||||
bytesio = six.BytesIO()
|
||||
with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
|
||||
gz.write(response)
|
||||
return bytesio.getvalue()
|
||||
|
||||
def _deflate(self, response):
|
||||
"""Apply deflate compression to a response."""
|
||||
return zlib.compress(response)
|
||||
|
||||
def _service_task(self): # pragma: no cover
|
||||
"""Monitor connected clients and clean up those that time out."""
|
||||
while True:
|
||||
if len(self.sockets) == 0:
|
||||
# nothing to do
|
||||
self.sleep(self.ping_timeout)
|
||||
continue
|
||||
|
||||
# go through the entire client list in a ping interval cycle
|
||||
sleep_interval = float(self.ping_timeout) / len(self.sockets)
|
||||
|
||||
try:
|
||||
# iterate over the current clients
|
||||
for s in self.sockets.copy().values():
|
||||
if not s.closing and not s.closed:
|
||||
s.check_ping_timeout()
|
||||
self.sleep(sleep_interval)
|
||||
except (SystemExit, KeyboardInterrupt):
|
||||
self.logger.info('service task canceled')
|
||||
break
|
||||
except:
|
||||
# an unexpected exception has occurred, log it and continue
|
||||
self.logger.exception('service task exception')
|
@ -0,0 +1,248 @@
|
||||
import six
|
||||
import sys
|
||||
import time
|
||||
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import payload
|
||||
|
||||
|
||||
class Socket(object):
|
||||
"""An Engine.IO socket."""
|
||||
upgrade_protocols = ['websocket']
|
||||
|
||||
def __init__(self, server, sid):
|
||||
self.server = server
|
||||
self.sid = sid
|
||||
self.queue = self.server.create_queue()
|
||||
self.last_ping = time.time()
|
||||
self.connected = False
|
||||
self.upgrading = False
|
||||
self.upgraded = False
|
||||
self.packet_backlog = []
|
||||
self.closing = False
|
||||
self.closed = False
|
||||
self.session = {}
|
||||
|
||||
def poll(self):
|
||||
"""Wait for packets to send to the client."""
|
||||
queue_empty = self.server.get_queue_empty_exception()
|
||||
try:
|
||||
packets = [self.queue.get(timeout=self.server.ping_timeout)]
|
||||
self.queue.task_done()
|
||||
except queue_empty:
|
||||
raise exceptions.QueueEmpty()
|
||||
if packets == [None]:
|
||||
return []
|
||||
while True:
|
||||
try:
|
||||
packets.append(self.queue.get(block=False))
|
||||
self.queue.task_done()
|
||||
except queue_empty:
|
||||
break
|
||||
return packets
|
||||
|
||||
def receive(self, pkt):
|
||||
"""Receive packet from the client."""
|
||||
packet_name = packet.packet_names[pkt.packet_type] \
|
||||
if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
|
||||
self.server.logger.info('%s: Received packet %s data %s',
|
||||
self.sid, packet_name,
|
||||
pkt.data if not isinstance(pkt.data, bytes)
|
||||
else '<binary>')
|
||||
if pkt.packet_type == packet.PING:
|
||||
self.last_ping = time.time()
|
||||
self.send(packet.Packet(packet.PONG, pkt.data))
|
||||
elif pkt.packet_type == packet.MESSAGE:
|
||||
self.server._trigger_event('message', self.sid, pkt.data,
|
||||
run_async=self.server.async_handlers)
|
||||
elif pkt.packet_type == packet.UPGRADE:
|
||||
self.send(packet.Packet(packet.NOOP))
|
||||
elif pkt.packet_type == packet.CLOSE:
|
||||
self.close(wait=False, abort=True)
|
||||
else:
|
||||
raise exceptions.UnknownPacketError()
|
||||
|
||||
def check_ping_timeout(self):
|
||||
"""Make sure the client is still sending pings.
|
||||
|
||||
This helps detect disconnections for long-polling clients.
|
||||
"""
|
||||
if self.closed:
|
||||
raise exceptions.SocketIsClosedError()
|
||||
if time.time() - self.last_ping > self.server.ping_interval + \
|
||||
self.server.ping_interval_grace_period:
|
||||
self.server.logger.info('%s: Client is gone, closing socket',
|
||||
self.sid)
|
||||
# Passing abort=False here will cause close() to write a
|
||||
# CLOSE packet. This has the effect of updating half-open sockets
|
||||
# to their correct state of disconnected
|
||||
self.close(wait=False, abort=False)
|
||||
return False
|
||||
return True
|
||||
|
||||
def send(self, pkt):
|
||||
"""Send a packet to the client."""
|
||||
if not self.check_ping_timeout():
|
||||
return
|
||||
if self.upgrading:
|
||||
self.packet_backlog.append(pkt)
|
||||
else:
|
||||
self.queue.put(pkt)
|
||||
self.server.logger.info('%s: Sending packet %s data %s',
|
||||
self.sid, packet.packet_names[pkt.packet_type],
|
||||
pkt.data if not isinstance(pkt.data, bytes)
|
||||
else '<binary>')
|
||||
|
||||
def handle_get_request(self, environ, start_response):
|
||||
"""Handle a long-polling GET request from the client."""
|
||||
connections = [
|
||||
s.strip()
|
||||
for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
|
||||
transport = environ.get('HTTP_UPGRADE', '').lower()
|
||||
if 'upgrade' in connections and transport in self.upgrade_protocols:
|
||||
self.server.logger.info('%s: Received request to upgrade to %s',
|
||||
self.sid, transport)
|
||||
return getattr(self, '_upgrade_' + transport)(environ,
|
||||
start_response)
|
||||
try:
|
||||
packets = self.poll()
|
||||
except exceptions.QueueEmpty:
|
||||
exc = sys.exc_info()
|
||||
self.close(wait=False)
|
||||
six.reraise(*exc)
|
||||
return packets
|
||||
|
||||
def handle_post_request(self, environ):
|
||||
"""Handle a long-polling POST request from the client."""
|
||||
length = int(environ.get('CONTENT_LENGTH', '0'))
|
||||
if length > self.server.max_http_buffer_size:
|
||||
raise exceptions.ContentTooLongError()
|
||||
else:
|
||||
body = environ['wsgi.input'].read(length)
|
||||
p = payload.Payload(encoded_payload=body)
|
||||
for pkt in p.packets:
|
||||
self.receive(pkt)
|
||||
|
||||
def close(self, wait=True, abort=False):
|
||||
"""Close the socket connection."""
|
||||
if not self.closed and not self.closing:
|
||||
self.closing = True
|
||||
self.server._trigger_event('disconnect', self.sid, run_async=False)
|
||||
if not abort:
|
||||
self.send(packet.Packet(packet.CLOSE))
|
||||
self.closed = True
|
||||
self.queue.put(None)
|
||||
if wait:
|
||||
self.queue.join()
|
||||
|
||||
def _upgrade_websocket(self, environ, start_response):
|
||||
"""Upgrade the connection from polling to websocket."""
|
||||
if self.upgraded:
|
||||
raise IOError('Socket has been upgraded already')
|
||||
if self.server._async['websocket'] is None:
|
||||
# the selected async mode does not support websocket
|
||||
return self.server._bad_request()
|
||||
ws = self.server._async['websocket'](self._websocket_handler)
|
||||
return ws(environ, start_response)
|
||||
|
||||
def _websocket_handler(self, ws):
|
||||
"""Engine.IO handler for websocket transport."""
|
||||
# try to set a socket timeout matching the configured ping interval
|
||||
for attr in ['_sock', 'socket']: # pragma: no cover
|
||||
if hasattr(ws, attr) and hasattr(getattr(ws, attr), 'settimeout'):
|
||||
getattr(ws, attr).settimeout(self.server.ping_timeout)
|
||||
|
||||
if self.connected:
|
||||
# the socket was already connected, so this is an upgrade
|
||||
self.upgrading = True # hold packet sends during the upgrade
|
||||
|
||||
pkt = ws.wait()
|
||||
decoded_pkt = packet.Packet(encoded_packet=pkt)
|
||||
if decoded_pkt.packet_type != packet.PING or \
|
||||
decoded_pkt.data != 'probe':
|
||||
self.server.logger.info(
|
||||
'%s: Failed websocket upgrade, no PING packet', self.sid)
|
||||
return []
|
||||
ws.send(packet.Packet(
|
||||
packet.PONG,
|
||||
data=six.text_type('probe')).encode(always_bytes=False))
|
||||
self.queue.put(packet.Packet(packet.NOOP)) # end poll
|
||||
|
||||
pkt = ws.wait()
|
||||
decoded_pkt = packet.Packet(encoded_packet=pkt)
|
||||
if decoded_pkt.packet_type != packet.UPGRADE:
|
||||
self.upgraded = False
|
||||
self.server.logger.info(
|
||||
('%s: Failed websocket upgrade, expected UPGRADE packet, '
|
||||
'received %s instead.'),
|
||||
self.sid, pkt)
|
||||
return []
|
||||
self.upgraded = True
|
||||
|
||||
# flush any packets that were sent during the upgrade
|
||||
for pkt in self.packet_backlog:
|
||||
self.queue.put(pkt)
|
||||
self.packet_backlog = []
|
||||
self.upgrading = False
|
||||
else:
|
||||
self.connected = True
|
||||
self.upgraded = True
|
||||
|
||||
# start separate writer thread
|
||||
def writer():
|
||||
while True:
|
||||
packets = None
|
||||
try:
|
||||
packets = self.poll()
|
||||
except exceptions.QueueEmpty:
|
||||
break
|
||||
if not packets:
|
||||
# empty packet list returned -> connection closed
|
||||
break
|
||||
try:
|
||||
for pkt in packets:
|
||||
ws.send(pkt.encode(always_bytes=False))
|
||||
except:
|
||||
break
|
||||
writer_task = self.server.start_background_task(writer)
|
||||
|
||||
self.server.logger.info(
|
||||
'%s: Upgrade to websocket successful', self.sid)
|
||||
|
||||
while True:
|
||||
p = None
|
||||
try:
|
||||
p = ws.wait()
|
||||
except Exception as e:
|
||||
# if the socket is already closed, we can assume this is a
|
||||
# downstream error of that
|
||||
if not self.closed: # pragma: no cover
|
||||
self.server.logger.info(
|
||||
'%s: Unexpected error "%s", closing connection',
|
||||
self.sid, str(e))
|
||||
break
|
||||
if p is None:
|
||||
# connection closed by client
|
||||
break
|
||||
if isinstance(p, six.text_type): # pragma: no cover
|
||||
p = p.encode('utf-8')
|
||||
pkt = packet.Packet(encoded_packet=p)
|
||||
try:
|
||||
self.receive(pkt)
|
||||
except exceptions.UnknownPacketError: # pragma: no cover
|
||||
pass
|
||||
except exceptions.SocketIsClosedError: # pragma: no cover
|
||||
self.server.logger.info('Receive error -- socket is closed')
|
||||
break
|
||||
except: # pragma: no cover
|
||||
# if we get an unexpected exception we log the error and exit
|
||||
# the connection properly
|
||||
self.server.logger.exception('Unknown receive error')
|
||||
break
|
||||
|
||||
self.queue.put(None) # unlock the writer task so that it can exit
|
||||
writer_task.join()
|
||||
self.close(wait=False, abort=True)
|
||||
|
||||
return []
|
@ -0,0 +1,55 @@
|
||||
content_types = {
|
||||
'css': 'text/css',
|
||||
'gif': 'image/gif',
|
||||
'html': 'text/html',
|
||||
'jpg': 'image/jpeg',
|
||||
'js': 'application/javascript',
|
||||
'json': 'application/json',
|
||||
'png': 'image/png',
|
||||
'txt': 'text/plain',
|
||||
}
|
||||
|
||||
|
||||
def get_static_file(path, static_files):
|
||||
"""Return the local filename and content type for the requested static
|
||||
file URL.
|
||||
|
||||
:param path: the path portion of the requested URL.
|
||||
:param static_files: a static file configuration dictionary.
|
||||
|
||||
This function returns a dictionary with two keys, "filename" and
|
||||
"content_type". If the requested URL does not match any static file, the
|
||||
return value is None.
|
||||
"""
|
||||
if path in static_files:
|
||||
f = static_files[path]
|
||||
else:
|
||||
f = None
|
||||
rest = ''
|
||||
while path != '':
|
||||
path, last = path.rsplit('/', 1)
|
||||
rest = '/' + last + rest
|
||||
if path in static_files:
|
||||
f = static_files[path] + rest
|
||||
break
|
||||
elif path + '/' in static_files:
|
||||
f = static_files[path + '/'] + rest[1:]
|
||||
break
|
||||
if f:
|
||||
if isinstance(f, str):
|
||||
f = {'filename': f}
|
||||
if f['filename'].endswith('/'):
|
||||
if '' in static_files:
|
||||
if isinstance(static_files[''], str):
|
||||
f['filename'] += static_files['']
|
||||
else:
|
||||
f['filename'] += static_files['']['filename']
|
||||
if 'content_type' in static_files['']:
|
||||
f['content_type'] = static_files['']['content_type']
|
||||
else:
|
||||
f['filename'] += 'index.html'
|
||||
if 'content_type' not in f:
|
||||
ext = f['filename'].rsplit('.')[-1]
|
||||
f['content_type'] = content_types.get(
|
||||
ext, 'application/octet-stream')
|
||||
return f
|
@ -0,0 +1,922 @@
|
||||
from functools import wraps
|
||||
import os
|
||||
import sys
|
||||
|
||||
# make sure gevent-socketio is not installed, as it conflicts with
|
||||
# python-socketio
|
||||
gevent_socketio_found = True
|
||||
try:
|
||||
from socketio import socketio_manage
|
||||
except ImportError:
|
||||
gevent_socketio_found = False
|
||||
if gevent_socketio_found:
|
||||
print('The gevent-socketio package is incompatible with this version of '
|
||||
'the Flask-SocketIO extension. Please uninstall it, and then '
|
||||
'install the latest version of python-socketio in its place.')
|
||||
sys.exit(1)
|
||||
|
||||
import flask
|
||||
from flask import _request_ctx_stack, json as flask_json
|
||||
from flask.sessions import SessionMixin
|
||||
import socketio
|
||||
from socketio.exceptions import ConnectionRefusedError
|
||||
from werkzeug.debug import DebuggedApplication
|
||||
from werkzeug.serving import run_with_reloader
|
||||
|
||||
from .namespace import Namespace
|
||||
from .test_client import SocketIOTestClient
|
||||
|
||||
__version__ = '4.2.1'
|
||||
|
||||
|
||||
class _SocketIOMiddleware(socketio.WSGIApp):
|
||||
"""This WSGI middleware simply exposes the Flask application in the WSGI
|
||||
environment before executing the request.
|
||||
"""
|
||||
def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
|
||||
self.flask_app = flask_app
|
||||
super(_SocketIOMiddleware, self).__init__(socketio_app,
|
||||
flask_app.wsgi_app,
|
||||
socketio_path=socketio_path)
|
||||
|
||||
def __call__(self, environ, start_response):
|
||||
environ = environ.copy()
|
||||
environ['flask.app'] = self.flask_app
|
||||
return super(_SocketIOMiddleware, self).__call__(environ,
|
||||
start_response)
|
||||
|
||||
|
||||
class _ManagedSession(dict, SessionMixin):
|
||||
"""This class is used for user sessions that are managed by
|
||||
Flask-SocketIO. It is simple dict, expanded with the Flask session
|
||||
attributes."""
|
||||
pass
|
||||
|
||||
|
||||
class SocketIO(object):
|
||||
"""Create a Flask-SocketIO server.
|
||||
|
||||
:param app: The flask application instance. If the application instance
|
||||
isn't known at the time this class is instantiated, then call
|
||||
``socketio.init_app(app)`` once the application instance is
|
||||
available.
|
||||
:param manage_session: If set to ``True``, this extension manages the user
|
||||
session for Socket.IO events. If set to ``False``,
|
||||
Flask's own session management is used. When using
|
||||
Flask's cookie based sessions it is recommended that
|
||||
you leave this set to the default of ``True``. When
|
||||
using server-side sessions, a ``False`` setting
|
||||
enables sharing the user session between HTTP routes
|
||||
and Socket.IO events.
|
||||
:param message_queue: A connection URL for a message queue service the
|
||||
server can use for multi-process communication. A
|
||||
message queue is not required when using a single
|
||||
server process.
|
||||
:param channel: The channel name, when using a message queue. If a channel
|
||||
isn't specified, a default channel will be used. If
|
||||
multiple clusters of SocketIO processes need to use the
|
||||
same message queue without interfering with each other, then
|
||||
each cluster should use a different channel.
|
||||
:param path: The path where the Socket.IO server is exposed. Defaults to
|
||||
``'socket.io'``. Leave this as is unless you know what you are
|
||||
doing.
|
||||
:param resource: Alias to ``path``.
|
||||
:param kwargs: Socket.IO and Engine.IO server options.
|
||||
|
||||
The Socket.IO server options are detailed below:
|
||||
|
||||
:param client_manager: The client manager instance that will manage the
|
||||
client list. When this is omitted, the client list
|
||||
is stored in an in-memory structure, so the use of
|
||||
multiple connected servers is not possible. In most
|
||||
cases, this argument does not need to be set
|
||||
explicitly.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param binary: ``True`` to support binary payloads, ``False`` to treat all
|
||||
payloads as text. On Python 2, if this is set to ``True``,
|
||||
``unicode`` values are treated as text, and ``str`` and
|
||||
``bytes`` values are treated as binary. This option has no
|
||||
effect on Python 3, where text and binary payloads are
|
||||
always automatically discovered.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions. To use the same json encoder and decoder as a Flask
|
||||
application, use ``flask.json``.
|
||||
:param async_handlers: If set to ``True``, event handlers for a client are
|
||||
executed in separate threads. To run handlers for a
|
||||
client synchronously, set to ``False``. The default
|
||||
is ``True``.
|
||||
:param always_connect: When set to ``False``, new connections are
|
||||
provisory until the connect handler returns
|
||||
something other than ``False``, at which point they
|
||||
are accepted. When set to ``True``, connections are
|
||||
immediately accepted, and then if the connect
|
||||
handler returns ``False`` a disconnect is issued.
|
||||
Set to ``True`` if you need to emit events from the
|
||||
connect handler and your client is confused when it
|
||||
receives events before the connection acceptance.
|
||||
In any other case use the default of ``False``.
|
||||
|
||||
The Engine.IO server configuration supports the following settings:
|
||||
|
||||
:param async_mode: The asynchronous model to use. See the Deployment
|
||||
section in the documentation for a description of the
|
||||
available options. Valid async modes are
|
||||
``threading``, ``eventlet``, ``gevent`` and
|
||||
``gevent_uwsgi``. If this argument is not given,
|
||||
``eventlet`` is tried first, then ``gevent_uwsgi``,
|
||||
then ``gevent``, and finally ``threading``. The
|
||||
first async mode that has all its dependencies installed
|
||||
is then one that is chosen.
|
||||
:param ping_timeout: The time in seconds that the client waits for the
|
||||
server to respond before disconnecting. The default is
|
||||
60 seconds.
|
||||
:param ping_interval: The interval in seconds at which the client pings
|
||||
the server. The default is 25 seconds.
|
||||
:param max_http_buffer_size: The maximum size of a message when using the
|
||||
polling transport. The default is 100,000,000
|
||||
bytes.
|
||||
:param allow_upgrades: Whether to allow transport upgrades or not. The
|
||||
default is ``True``.
|
||||
:param http_compression: Whether to compress packages when using the
|
||||
polling transport. The default is ``True``.
|
||||
:param compression_threshold: Only compress messages when their byte size
|
||||
is greater than this value. The default is
|
||||
1024 bytes.
|
||||
:param cookie: Name of the HTTP cookie that contains the client session
|
||||
id. If set to ``None``, a cookie is not sent to the client.
|
||||
The default is ``'io'``.
|
||||
:param cors_allowed_origins: Origin or list of origins that are allowed to
|
||||
connect to this server. Only the same origin
|
||||
is allowed by default. Set this argument to
|
||||
``'*'`` to allow all origins, or to ``[]`` to
|
||||
disable CORS handling.
|
||||
:param cors_credentials: Whether credentials (cookies, authentication) are
|
||||
allowed in requests to this server. The default is
|
||||
``True``.
|
||||
:param monitor_clients: If set to ``True``, a background task will ensure
|
||||
inactive clients are closed. Set to ``False`` to
|
||||
disable the monitoring task (not recommended). The
|
||||
default is ``True``.
|
||||
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
|
||||
a logger object to use. To disable logging set to
|
||||
``False``. The default is ``False``.
|
||||
"""
|
||||
|
||||
def __init__(self, app=None, **kwargs):
|
||||
self.server = None
|
||||
self.server_options = {}
|
||||
self.wsgi_server = None
|
||||
self.handlers = []
|
||||
self.namespace_handlers = []
|
||||
self.exception_handlers = {}
|
||||
self.default_exception_handler = None
|
||||
self.manage_session = True
|
||||
# We can call init_app when:
|
||||
# - we were given the Flask app instance (standard initialization)
|
||||
# - we were not given the app, but we were given a message_queue
|
||||
# (standard initialization for auxiliary process)
|
||||
# In all other cases we collect the arguments and assume the client
|
||||
# will call init_app from an app factory function.
|
||||
if app is not None or 'message_queue' in kwargs:
|
||||
self.init_app(app, **kwargs)
|
||||
else:
|
||||
self.server_options.update(kwargs)
|
||||
|
||||
def init_app(self, app, **kwargs):
|
||||
if app is not None:
|
||||
if not hasattr(app, 'extensions'):
|
||||
app.extensions = {} # pragma: no cover
|
||||
app.extensions['socketio'] = self
|
||||
self.server_options.update(kwargs)
|
||||
self.manage_session = self.server_options.pop('manage_session',
|
||||
self.manage_session)
|
||||
|
||||
if 'client_manager' not in self.server_options:
|
||||
url = self.server_options.pop('message_queue', None)
|
||||
channel = self.server_options.pop('channel', 'flask-socketio')
|
||||
write_only = app is None
|
||||
if url:
|
||||
if url.startswith(('redis://', "rediss://")):
|
||||
queue_class = socketio.RedisManager
|
||||
elif url.startswith(('kafka://')):
|
||||
queue_class = socketio.KafkaManager
|
||||
elif url.startswith('zmq'):
|
||||
queue_class = socketio.ZmqManager
|
||||
else:
|
||||
queue_class = socketio.KombuManager
|
||||
queue = queue_class(url, channel=channel,
|
||||
write_only=write_only)
|
||||
self.server_options['client_manager'] = queue
|
||||
|
||||
if 'json' in self.server_options and \
|
||||
self.server_options['json'] == flask_json:
|
||||
# flask's json module is tricky to use because its output
|
||||
# changes when it is invoked inside or outside the app context
|
||||
# so here to prevent any ambiguities we replace it with wrappers
|
||||
# that ensure that the app context is always present
|
||||
class FlaskSafeJSON(object):
|
||||
@staticmethod
|
||||
def dumps(*args, **kwargs):
|
||||
with app.app_context():
|
||||
return flask_json.dumps(*args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def loads(*args, **kwargs):
|
||||
with app.app_context():
|
||||
return flask_json.loads(*args, **kwargs)
|
||||
|
||||
self.server_options['json'] = FlaskSafeJSON
|
||||
|
||||
resource = self.server_options.pop('path', None) or \
|
||||
self.server_options.pop('resource', None) or 'socket.io'
|
||||
if resource.startswith('/'):
|
||||
resource = resource[1:]
|
||||
if os.environ.get('FLASK_RUN_FROM_CLI'):
|
||||
if self.server_options.get('async_mode') is None:
|
||||
if app is not None:
|
||||
app.logger.warning(
|
||||
'Flask-SocketIO is Running under Werkzeug, WebSocket '
|
||||
'is not available.')
|
||||
self.server_options['async_mode'] = 'threading'
|
||||
self.server = socketio.Server(**self.server_options)
|
||||
self.async_mode = self.server.async_mode
|
||||
for handler in self.handlers:
|
||||
self.server.on(handler[0], handler[1], namespace=handler[2])
|
||||
for namespace_handler in self.namespace_handlers:
|
||||
self.server.register_namespace(namespace_handler)
|
||||
|
||||
if app is not None:
|
||||
# here we attach the SocketIO middlware to the SocketIO object so it
|
||||
# can be referenced later if debug middleware needs to be inserted
|
||||
self.sockio_mw = _SocketIOMiddleware(self.server, app,
|
||||
socketio_path=resource)
|
||||
app.wsgi_app = self.sockio_mw
|
||||
|
||||
def on(self, message, namespace=None):
|
||||
"""Decorator to register a SocketIO event handler.
|
||||
|
||||
This decorator must be applied to SocketIO event handlers. Example::
|
||||
|
||||
@socketio.on('my event', namespace='/chat')
|
||||
def handle_my_custom_event(json):
|
||||
print('received json: ' + str(json))
|
||||
|
||||
:param message: The name of the event. This is normally a user defined
|
||||
string, but a few event names are already defined. Use
|
||||
``'message'`` to define a handler that takes a string
|
||||
payload, ``'json'`` to define a handler that takes a
|
||||
JSON blob payload, ``'connect'`` or ``'disconnect'``
|
||||
to create handlers for connection and disconnection
|
||||
events.
|
||||
:param namespace: The namespace on which the handler is to be
|
||||
registered. Defaults to the global namespace.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
|
||||
def decorator(handler):
|
||||
@wraps(handler)
|
||||
def _handler(sid, *args):
|
||||
return self._handle_event(handler, message, namespace, sid,
|
||||
*args)
|
||||
|
||||
if self.server:
|
||||
self.server.on(message, _handler, namespace=namespace)
|
||||
else:
|
||||
self.handlers.append((message, _handler, namespace))
|
||||
return handler
|
||||
return decorator
|
||||
|
||||
def on_error(self, namespace=None):
|
||||
"""Decorator to define a custom error handler for SocketIO events.
|
||||
|
||||
This decorator can be applied to a function that acts as an error
|
||||
handler for a namespace. This handler will be invoked when a SocketIO
|
||||
event handler raises an exception. The handler function must accept one
|
||||
argument, which is the exception raised. Example::
|
||||
|
||||
@socketio.on_error(namespace='/chat')
|
||||
def chat_error_handler(e):
|
||||
print('An error has occurred: ' + str(e))
|
||||
|
||||
:param namespace: The namespace for which to register the error
|
||||
handler. Defaults to the global namespace.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
|
||||
def decorator(exception_handler):
|
||||
if not callable(exception_handler):
|
||||
raise ValueError('exception_handler must be callable')
|
||||
self.exception_handlers[namespace] = exception_handler
|
||||
return exception_handler
|
||||
return decorator
|
||||
|
||||
def on_error_default(self, exception_handler):
|
||||
"""Decorator to define a default error handler for SocketIO events.
|
||||
|
||||
This decorator can be applied to a function that acts as a default
|
||||
error handler for any namespaces that do not have a specific handler.
|
||||
Example::
|
||||
|
||||
@socketio.on_error_default
|
||||
def error_handler(e):
|
||||
print('An error has occurred: ' + str(e))
|
||||
"""
|
||||
if not callable(exception_handler):
|
||||
raise ValueError('exception_handler must be callable')
|
||||
self.default_exception_handler = exception_handler
|
||||
return exception_handler
|
||||
|
||||
def on_event(self, message, handler, namespace=None):
|
||||
"""Register a SocketIO event handler.
|
||||
|
||||
``on_event`` is the non-decorator version of ``'on'``.
|
||||
|
||||
Example::
|
||||
|
||||
def on_foo_event(json):
|
||||
print('received json: ' + str(json))
|
||||
|
||||
socketio.on_event('my event', on_foo_event, namespace='/chat')
|
||||
|
||||
:param message: The name of the event. This is normally a user defined
|
||||
string, but a few event names are already defined. Use
|
||||
``'message'`` to define a handler that takes a string
|
||||
payload, ``'json'`` to define a handler that takes a
|
||||
JSON blob payload, ``'connect'`` or ``'disconnect'``
|
||||
to create handlers for connection and disconnection
|
||||
events.
|
||||
:param handler: The function that handles the event.
|
||||
:param namespace: The namespace on which the handler is to be
|
||||
registered. Defaults to the global namespace.
|
||||
"""
|
||||
self.on(message, namespace=namespace)(handler)
|
||||
|
||||
def on_namespace(self, namespace_handler):
|
||||
if not isinstance(namespace_handler, Namespace):
|
||||
raise ValueError('Not a namespace instance.')
|
||||
namespace_handler._set_socketio(self)
|
||||
if self.server:
|
||||
self.server.register_namespace(namespace_handler)
|
||||
else:
|
||||
self.namespace_handlers.append(namespace_handler)
|
||||
|
||||
def emit(self, event, *args, **kwargs):
|
||||
"""Emit a server generated SocketIO event.
|
||||
|
||||
This function emits a SocketIO event to one or more connected clients.
|
||||
A JSON blob can be attached to the event as payload. This function can
|
||||
be used outside of a SocketIO event context, so it is appropriate to
|
||||
use when the server is the originator of an event, outside of any
|
||||
client context, such as in a regular HTTP request handler or a
|
||||
background task. Example::
|
||||
|
||||
@app.route('/ping')
|
||||
def ping():
|
||||
socketio.emit('ping event', {'data': 42}, namespace='/chat')
|
||||
|
||||
:param event: The name of the user event to emit.
|
||||
:param args: A dictionary with the JSON data to send as payload.
|
||||
:param namespace: The namespace under which the message is to be sent.
|
||||
Defaults to the global namespace.
|
||||
:param room: Send the message to all the users in the given room. If
|
||||
this parameter is not included, the event is sent to
|
||||
all connected users.
|
||||
:param skip_sid: The session id of a client to ignore when broadcasting
|
||||
or addressing a room. This is typically set to the
|
||||
originator of the message, so that everyone except
|
||||
that client receive the message. To skip multiple sids
|
||||
pass a list.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
that the client has received the message. The
|
||||
arguments that will be passed to the function are
|
||||
those provided by the client. Callback functions can
|
||||
only be used when addressing an individual client.
|
||||
"""
|
||||
namespace = kwargs.pop('namespace', '/')
|
||||
room = kwargs.pop('room', None)
|
||||
include_self = kwargs.pop('include_self', True)
|
||||
skip_sid = kwargs.pop('skip_sid', None)
|
||||
if not include_self and not skip_sid:
|
||||
skip_sid = flask.request.sid
|
||||
callback = kwargs.pop('callback', None)
|
||||
if callback:
|
||||
# wrap the callback so that it sets app app and request contexts
|
||||
sid = flask.request.sid
|
||||
original_callback = callback
|
||||
|
||||
def _callback_wrapper(*args):
|
||||
return self._handle_event(original_callback, None, namespace,
|
||||
sid, *args)
|
||||
|
||||
callback = _callback_wrapper
|
||||
self.server.emit(event, *args, namespace=namespace, room=room,
|
||||
skip_sid=skip_sid, callback=callback, **kwargs)
|
||||
|
||||
def send(self, data, json=False, namespace=None, room=None,
|
||||
callback=None, include_self=True, skip_sid=None, **kwargs):
|
||||
"""Send a server-generated SocketIO message.
|
||||
|
||||
This function sends a simple SocketIO message to one or more connected
|
||||
clients. The message can be a string or a JSON blob. This is a simpler
|
||||
version of ``emit()``, which should be preferred. This function can be
|
||||
used outside of a SocketIO event context, so it is appropriate to use
|
||||
when the server is the originator of an event.
|
||||
|
||||
:param data: The message to send, either a string or a JSON blob.
|
||||
:param json: ``True`` if ``message`` is a JSON blob, ``False``
|
||||
otherwise.
|
||||
:param namespace: The namespace under which the message is to be sent.
|
||||
Defaults to the global namespace.
|
||||
:param room: Send the message only to the users in the given room. If
|
||||
this parameter is not included, the message is sent to
|
||||
all connected users.
|
||||
:param skip_sid: The session id of a client to ignore when broadcasting
|
||||
or addressing a room. This is typically set to the
|
||||
originator of the message, so that everyone except
|
||||
that client receive the message. To skip multiple sids
|
||||
pass a list.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
that the client has received the message. The
|
||||
arguments that will be passed to the function are
|
||||
those provided by the client. Callback functions can
|
||||
only be used when addressing an individual client.
|
||||
"""
|
||||
skip_sid = flask.request.sid if not include_self else skip_sid
|
||||
if json:
|
||||
self.emit('json', data, namespace=namespace, room=room,
|
||||
skip_sid=skip_sid, callback=callback, **kwargs)
|
||||
else:
|
||||
self.emit('message', data, namespace=namespace, room=room,
|
||||
skip_sid=skip_sid, callback=callback, **kwargs)
|
||||
|
||||
def close_room(self, room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
This function removes any users that are in the given room and then
|
||||
deletes the room from the server. This function can be used outside
|
||||
of a SocketIO event context.
|
||||
|
||||
:param room: The name of the room to close.
|
||||
:param namespace: The namespace under which the room exists. Defaults
|
||||
to the global namespace.
|
||||
"""
|
||||
self.server.close_room(room, namespace)
|
||||
|
||||
def run(self, app, host=None, port=None, **kwargs):
|
||||
"""Run the SocketIO web server.
|
||||
|
||||
:param app: The Flask application instance.
|
||||
:param host: The hostname or IP address for the server to listen on.
|
||||
Defaults to 127.0.0.1.
|
||||
:param port: The port number for the server to listen on. Defaults to
|
||||
5000.
|
||||
:param debug: ``True`` to start the server in debug mode, ``False`` to
|
||||
start in normal mode.
|
||||
:param use_reloader: ``True`` to enable the Flask reloader, ``False``
|
||||
to disable it.
|
||||
:param extra_files: A list of additional files that the Flask
|
||||
reloader should watch. Defaults to ``None``
|
||||
:param log_output: If ``True``, the server logs all incomming
|
||||
connections. If ``False`` logging is disabled.
|
||||
Defaults to ``True`` in debug mode, ``False``
|
||||
in normal mode. Unused when the threading async
|
||||
mode is used.
|
||||
:param kwargs: Additional web server options. The web server options
|
||||
are specific to the server used in each of the supported
|
||||
async modes. Note that options provided here will
|
||||
not be seen when using an external web server such
|
||||
as gunicorn, since this method is not called in that
|
||||
case.
|
||||
"""
|
||||
if host is None:
|
||||
host = '127.0.0.1'
|
||||
if port is None:
|
||||
server_name = app.config['SERVER_NAME']
|
||||
if server_name and ':' in server_name:
|
||||
port = int(server_name.rsplit(':', 1)[1])
|
||||
else:
|
||||
port = 5000
|
||||
|
||||
debug = kwargs.pop('debug', app.debug)
|
||||
log_output = kwargs.pop('log_output', debug)
|
||||
use_reloader = kwargs.pop('use_reloader', debug)
|
||||
extra_files = kwargs.pop('extra_files', None)
|
||||
|
||||
app.debug = debug
|
||||
if app.debug and self.server.eio.async_mode != 'threading':
|
||||
# put the debug middleware between the SocketIO middleware
|
||||
# and the Flask application instance
|
||||
#
|
||||
# mw1 mw2 mw3 Flask app
|
||||
# o ---- o ---- o ---- o
|
||||
# /
|
||||
# o Flask-SocketIO
|
||||
# \ middleware
|
||||
# o
|
||||
# Flask-SocketIO WebSocket handler
|
||||
#
|
||||
# BECOMES
|
||||
#
|
||||
# dbg-mw mw1 mw2 mw3 Flask app
|
||||
# o ---- o ---- o ---- o ---- o
|
||||
# /
|
||||
# o Flask-SocketIO
|
||||
# \ middleware
|
||||
# o
|
||||
# Flask-SocketIO WebSocket handler
|
||||
#
|
||||
self.sockio_mw.wsgi_app = DebuggedApplication(self.sockio_mw.wsgi_app,
|
||||
evalex=True)
|
||||
|
||||
if self.server.eio.async_mode == 'threading':
|
||||
from werkzeug._internal import _log
|
||||
_log('warning', 'WebSocket transport not available. Install '
|
||||
'eventlet or gevent and gevent-websocket for '
|
||||
'improved performance.')
|
||||
app.run(host=host, port=port, threaded=True,
|
||||
use_reloader=use_reloader, **kwargs)
|
||||
elif self.server.eio.async_mode == 'eventlet':
|
||||
def run_server():
|
||||
import eventlet
|
||||
import eventlet.wsgi
|
||||
import eventlet.green
|
||||
addresses = eventlet.green.socket.getaddrinfo(host, port)
|
||||
if not addresses:
|
||||
raise RuntimeError('Could not resolve host to a valid address')
|
||||
eventlet_socket = eventlet.listen(addresses[0][4], addresses[0][0])
|
||||
|
||||
# If provided an SSL argument, use an SSL socket
|
||||
ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
|
||||
'ssl_version', 'ca_certs',
|
||||
'do_handshake_on_connect', 'suppress_ragged_eofs',
|
||||
'ciphers']
|
||||
ssl_params = {k: kwargs[k] for k in kwargs if k in ssl_args}
|
||||
if len(ssl_params) > 0:
|
||||
for k in ssl_params:
|
||||
kwargs.pop(k)
|
||||
ssl_params['server_side'] = True # Listening requires true
|
||||
eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
|
||||
**ssl_params)
|
||||
|
||||
eventlet.wsgi.server(eventlet_socket, app,
|
||||
log_output=log_output, **kwargs)
|
||||
|
||||
if use_reloader:
|
||||
run_with_reloader(run_server, extra_files=extra_files)
|
||||
else:
|
||||
run_server()
|
||||
elif self.server.eio.async_mode == 'gevent':
|
||||
from gevent import pywsgi
|
||||
try:
|
||||
from geventwebsocket.handler import WebSocketHandler
|
||||
websocket = True
|
||||
except ImportError:
|
||||
websocket = False
|
||||
|
||||
log = 'default'
|
||||
if not log_output:
|
||||
log = None
|
||||
if websocket:
|
||||
self.wsgi_server = pywsgi.WSGIServer(
|
||||
(host, port), app, handler_class=WebSocketHandler,
|
||||
log=log, **kwargs)
|
||||
else:
|
||||
self.wsgi_server = pywsgi.WSGIServer((host, port), app,
|
||||
log=log, **kwargs)
|
||||
|
||||
if use_reloader:
|
||||
# monkey patching is required by the reloader
|
||||
from gevent import monkey
|
||||
monkey.patch_thread()
|
||||
monkey.patch_time()
|
||||
|
||||
def run_server():
|
||||
self.wsgi_server.serve_forever()
|
||||
|
||||
run_with_reloader(run_server, extra_files=extra_files)
|
||||
else:
|
||||
self.wsgi_server.serve_forever()
|
||||
|
||||
def stop(self):
|
||||
"""Stop a running SocketIO web server.
|
||||
|
||||
This method must be called from a HTTP or SocketIO handler function.
|
||||
"""
|
||||
if self.server.eio.async_mode == 'threading':
|
||||
func = flask.request.environ.get('werkzeug.server.shutdown')
|
||||
if func:
|
||||
func()
|
||||
else:
|
||||
raise RuntimeError('Cannot stop unknown web server')
|
||||
elif self.server.eio.async_mode == 'eventlet':
|
||||
raise SystemExit
|
||||
elif self.server.eio.async_mode == 'gevent':
|
||||
self.wsgi_server.stop()
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
return self.server.start_background_task(target, *args, **kwargs)
|
||||
|
||||
def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self.server.sleep(seconds)
|
||||
|
||||
def test_client(self, app, namespace=None, query_string=None,
|
||||
headers=None, flask_test_client=None):
|
||||
"""The Socket.IO test client is useful for testing a Flask-SocketIO
|
||||
server. It works in a similar way to the Flask Test Client, but
|
||||
adapted to the Socket.IO server.
|
||||
|
||||
:param app: The Flask application instance.
|
||||
:param namespace: The namespace for the client. If not provided, the
|
||||
client connects to the server on the global
|
||||
namespace.
|
||||
:param query_string: A string with custom query string arguments.
|
||||
:param headers: A dictionary with custom HTTP headers.
|
||||
:param flask_test_client: The instance of the Flask test client
|
||||
currently in use. Passing the Flask test
|
||||
client is optional, but is necessary if you
|
||||
want the Flask user session and any other
|
||||
cookies set in HTTP routes accessible from
|
||||
Socket.IO events.
|
||||
"""
|
||||
return SocketIOTestClient(app, self, namespace=namespace,
|
||||
query_string=query_string, headers=headers,
|
||||
flask_test_client=flask_test_client)
|
||||
|
||||
def _handle_event(self, handler, message, namespace, sid, *args):
|
||||
if sid not in self.server.environ:
|
||||
# we don't have record of this client, ignore this event
|
||||
return '', 400
|
||||
app = self.server.environ[sid]['flask.app']
|
||||
with app.request_context(self.server.environ[sid]):
|
||||
if self.manage_session:
|
||||
# manage a separate session for this client's Socket.IO events
|
||||
# created as a copy of the regular user session
|
||||
if 'saved_session' not in self.server.environ[sid]:
|
||||
self.server.environ[sid]['saved_session'] = \
|
||||
_ManagedSession(flask.session)
|
||||
session_obj = self.server.environ[sid]['saved_session']
|
||||
else:
|
||||
# let Flask handle the user session
|
||||
# for cookie based sessions, this effectively freezes the
|
||||
# session to its state at connection time
|
||||
# for server-side sessions, this allows HTTP and Socket.IO to
|
||||
# share the session, with both having read/write access to it
|
||||
session_obj = flask.session._get_current_object()
|
||||
_request_ctx_stack.top.session = session_obj
|
||||
flask.request.sid = sid
|
||||
flask.request.namespace = namespace
|
||||
flask.request.event = {'message': message, 'args': args}
|
||||
try:
|
||||
if message == 'connect':
|
||||
ret = handler()
|
||||
else:
|
||||
ret = handler(*args)
|
||||
except:
|
||||
err_handler = self.exception_handlers.get(
|
||||
namespace, self.default_exception_handler)
|
||||
if err_handler is None:
|
||||
raise
|
||||
type, value, traceback = sys.exc_info()
|
||||
return err_handler(value)
|
||||
if not self.manage_session:
|
||||
# when Flask is managing the user session, it needs to save it
|
||||
if not hasattr(session_obj, 'modified') or session_obj.modified:
|
||||
resp = app.response_class()
|
||||
app.session_interface.save_session(app, session_obj, resp)
|
||||
return ret
|
||||
|
||||
|
||||
def emit(event, *args, **kwargs):
|
||||
"""Emit a SocketIO event.
|
||||
|
||||
This function emits a SocketIO event to one or more connected clients. A
|
||||
JSON blob can be attached to the event as payload. This is a function that
|
||||
can only be called from a SocketIO event handler, as in obtains some
|
||||
information from the current client context. Example::
|
||||
|
||||
@socketio.on('my event')
|
||||
def handle_my_custom_event(json):
|
||||
emit('my response', {'data': 42})
|
||||
|
||||
:param event: The name of the user event to emit.
|
||||
:param args: A dictionary with the JSON data to send as payload.
|
||||
:param namespace: The namespace under which the message is to be sent.
|
||||
Defaults to the namespace used by the originating event.
|
||||
A ``'/'`` can be used to explicitly specify the global
|
||||
namespace.
|
||||
:param callback: Callback function to invoke with the client's
|
||||
acknowledgement.
|
||||
:param broadcast: ``True`` to send the message to all clients, or ``False``
|
||||
to only reply to the sender of the originating event.
|
||||
:param room: Send the message to all the users in the given room. If this
|
||||
argument is set, then broadcast is implied to be ``True``.
|
||||
:param include_self: ``True`` to include the sender when broadcasting or
|
||||
addressing a room, or ``False`` to send to everyone
|
||||
but the sender.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used, or when there is a
|
||||
single addresee. It is recommended to always leave
|
||||
this parameter with its default value of ``False``.
|
||||
"""
|
||||
if 'namespace' in kwargs:
|
||||
namespace = kwargs['namespace']
|
||||
else:
|
||||
namespace = flask.request.namespace
|
||||
callback = kwargs.get('callback')
|
||||
broadcast = kwargs.get('broadcast')
|
||||
room = kwargs.get('room')
|
||||
if room is None and not broadcast:
|
||||
room = flask.request.sid
|
||||
include_self = kwargs.get('include_self', True)
|
||||
ignore_queue = kwargs.get('ignore_queue', False)
|
||||
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
return socketio.emit(event, *args, namespace=namespace, room=room,
|
||||
include_self=include_self, callback=callback,
|
||||
ignore_queue=ignore_queue)
|
||||
|
||||
|
||||
def send(message, **kwargs):
|
||||
"""Send a SocketIO message.
|
||||
|
||||
This function sends a simple SocketIO message to one or more connected
|
||||
clients. The message can be a string or a JSON blob. This is a simpler
|
||||
version of ``emit()``, which should be preferred. This is a function that
|
||||
can only be called from a SocketIO event handler.
|
||||
|
||||
:param message: The message to send, either a string or a JSON blob.
|
||||
:param json: ``True`` if ``message`` is a JSON blob, ``False``
|
||||
otherwise.
|
||||
:param namespace: The namespace under which the message is to be sent.
|
||||
Defaults to the namespace used by the originating event.
|
||||
An empty string can be used to use the global namespace.
|
||||
:param callback: Callback function to invoke with the client's
|
||||
acknowledgement.
|
||||
:param broadcast: ``True`` to send the message to all connected clients, or
|
||||
``False`` to only reply to the sender of the originating
|
||||
event.
|
||||
:param room: Send the message to all the users in the given room.
|
||||
:param include_self: ``True`` to include the sender when broadcasting or
|
||||
addressing a room, or ``False`` to send to everyone
|
||||
but the sender.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used, or when there is a
|
||||
single addresee. It is recommended to always leave
|
||||
this parameter with its default value of ``False``.
|
||||
"""
|
||||
json = kwargs.get('json', False)
|
||||
if 'namespace' in kwargs:
|
||||
namespace = kwargs['namespace']
|
||||
else:
|
||||
namespace = flask.request.namespace
|
||||
callback = kwargs.get('callback')
|
||||
broadcast = kwargs.get('broadcast')
|
||||
room = kwargs.get('room')
|
||||
if room is None and not broadcast:
|
||||
room = flask.request.sid
|
||||
include_self = kwargs.get('include_self', True)
|
||||
ignore_queue = kwargs.get('ignore_queue', False)
|
||||
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
return socketio.send(message, json=json, namespace=namespace, room=room,
|
||||
include_self=include_self, callback=callback,
|
||||
ignore_queue=ignore_queue)
|
||||
|
||||
|
||||
def join_room(room, sid=None, namespace=None):
|
||||
"""Join a room.
|
||||
|
||||
This function puts the user in a room, under the current namespace. The
|
||||
user and the namespace are obtained from the event context. This is a
|
||||
function that can only be called from a SocketIO event handler. Example::
|
||||
|
||||
@socketio.on('join')
|
||||
def on_join(data):
|
||||
username = session['username']
|
||||
room = data['room']
|
||||
join_room(room)
|
||||
send(username + ' has entered the room.', room=room)
|
||||
|
||||
:param room: The name of the room to join.
|
||||
:param sid: The session id of the client. If not provided, the client is
|
||||
obtained from the request context.
|
||||
:param namespace: The namespace for the room. If not provided, the
|
||||
namespace is obtained from the request context.
|
||||
"""
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
sid = sid or flask.request.sid
|
||||
namespace = namespace or flask.request.namespace
|
||||
socketio.server.enter_room(sid, room, namespace=namespace)
|
||||
|
||||
|
||||
def leave_room(room, sid=None, namespace=None):
|
||||
"""Leave a room.
|
||||
|
||||
This function removes the user from a room, under the current namespace.
|
||||
The user and the namespace are obtained from the event context. Example::
|
||||
|
||||
@socketio.on('leave')
|
||||
def on_leave(data):
|
||||
username = session['username']
|
||||
room = data['room']
|
||||
leave_room(room)
|
||||
send(username + ' has left the room.', room=room)
|
||||
|
||||
:param room: The name of the room to leave.
|
||||
:param sid: The session id of the client. If not provided, the client is
|
||||
obtained from the request context.
|
||||
:param namespace: The namespace for the room. If not provided, the
|
||||
namespace is obtained from the request context.
|
||||
"""
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
sid = sid or flask.request.sid
|
||||
namespace = namespace or flask.request.namespace
|
||||
socketio.server.leave_room(sid, room, namespace=namespace)
|
||||
|
||||
|
||||
def close_room(room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
This function removes any users that are in the given room and then deletes
|
||||
the room from the server.
|
||||
|
||||
:param room: The name of the room to close.
|
||||
:param namespace: The namespace for the room. If not provided, the
|
||||
namespace is obtained from the request context.
|
||||
"""
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
namespace = namespace or flask.request.namespace
|
||||
socketio.server.close_room(room, namespace=namespace)
|
||||
|
||||
|
||||
def rooms(sid=None, namespace=None):
|
||||
"""Return a list of the rooms the client is in.
|
||||
|
||||
This function returns all the rooms the client has entered, including its
|
||||
own room, assigned by the Socket.IO server.
|
||||
|
||||
:param sid: The session id of the client. If not provided, the client is
|
||||
obtained from the request context.
|
||||
:param namespace: The namespace for the room. If not provided, the
|
||||
namespace is obtained from the request context.
|
||||
"""
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
sid = sid or flask.request.sid
|
||||
namespace = namespace or flask.request.namespace
|
||||
return socketio.server.rooms(sid, namespace=namespace)
|
||||
|
||||
|
||||
def disconnect(sid=None, namespace=None, silent=False):
|
||||
"""Disconnect the client.
|
||||
|
||||
This function terminates the connection with the client. As a result of
|
||||
this call the client will receive a disconnect event. Example::
|
||||
|
||||
@socketio.on('message')
|
||||
def receive_message(msg):
|
||||
if is_banned(session['username']):
|
||||
disconnect()
|
||||
else:
|
||||
# ...
|
||||
|
||||
:param sid: The session id of the client. If not provided, the client is
|
||||
obtained from the request context.
|
||||
:param namespace: The namespace for the room. If not provided, the
|
||||
namespace is obtained from the request context.
|
||||
:param silent: this option is deprecated.
|
||||
"""
|
||||
socketio = flask.current_app.extensions['socketio']
|
||||
sid = sid or flask.request.sid
|
||||
namespace = namespace or flask.request.namespace
|
||||
return socketio.server.disconnect(sid, namespace=namespace)
|
@ -0,0 +1,47 @@
|
||||
from socketio import Namespace as _Namespace
|
||||
|
||||
|
||||
class Namespace(_Namespace):
|
||||
def __init__(self, namespace=None):
|
||||
super(Namespace, self).__init__(namespace)
|
||||
self.socketio = None
|
||||
|
||||
def _set_socketio(self, socketio):
|
||||
self.socketio = socketio
|
||||
|
||||
def trigger_event(self, event, *args):
|
||||
"""Dispatch an event to the proper handler method.
|
||||
|
||||
In the most common usage, this method is not overloaded by subclasses,
|
||||
as it performs the routing of events to methods. However, this
|
||||
method can be overriden if special dispatching rules are needed, or if
|
||||
having a single method that catches all events is desired.
|
||||
"""
|
||||
handler_name = 'on_' + event
|
||||
if not hasattr(self, handler_name):
|
||||
# there is no handler for this event, so we ignore it
|
||||
return
|
||||
handler = getattr(self, handler_name)
|
||||
return self.socketio._handle_event(handler, event, self.namespace,
|
||||
*args)
|
||||
|
||||
def emit(self, event, data=None, room=None, include_self=True,
|
||||
namespace=None, callback=None):
|
||||
"""Emit a custom event to one or more connected clients."""
|
||||
return self.socketio.emit(event, data, room=room,
|
||||
include_self=include_self,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def send(self, data, room=None, include_self=True, namespace=None,
|
||||
callback=None):
|
||||
"""Send a message to one or more connected clients."""
|
||||
return self.socketio.send(data, room=room, include_self=include_self,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def close_room(self, room, namespace=None):
|
||||
"""Close a room."""
|
||||
return self.socketio.close_room(room=room,
|
||||
namespace=namespace or self.namespace)
|
||||
|
@ -0,0 +1,205 @@
|
||||
import uuid
|
||||
|
||||
from socketio import packet
|
||||
from socketio.pubsub_manager import PubSubManager
|
||||
from werkzeug.test import EnvironBuilder
|
||||
|
||||
|
||||
class SocketIOTestClient(object):
|
||||
"""
|
||||
This class is useful for testing a Flask-SocketIO server. It works in a
|
||||
similar way to the Flask Test Client, but adapted to the Socket.IO server.
|
||||
|
||||
:param app: The Flask application instance.
|
||||
:param socketio: The application's ``SocketIO`` instance.
|
||||
:param namespace: The namespace for the client. If not provided, the client
|
||||
connects to the server on the global namespace.
|
||||
:param query_string: A string with custom query string arguments.
|
||||
:param headers: A dictionary with custom HTTP headers.
|
||||
:param flask_test_client: The instance of the Flask test client
|
||||
currently in use. Passing the Flask test
|
||||
client is optional, but is necessary if you
|
||||
want the Flask user session and any other
|
||||
cookies set in HTTP routes accessible from
|
||||
Socket.IO events.
|
||||
"""
|
||||
queue = {}
|
||||
acks = {}
|
||||
|
||||
def __init__(self, app, socketio, namespace=None, query_string=None,
|
||||
headers=None, flask_test_client=None):
|
||||
def _mock_send_packet(sid, pkt):
|
||||
if pkt.packet_type == packet.EVENT or \
|
||||
pkt.packet_type == packet.BINARY_EVENT:
|
||||
if sid not in self.queue:
|
||||
self.queue[sid] = []
|
||||
if pkt.data[0] == 'message' or pkt.data[0] == 'json':
|
||||
self.queue[sid].append({'name': pkt.data[0],
|
||||
'args': pkt.data[1],
|
||||
'namespace': pkt.namespace or '/'})
|
||||
else:
|
||||
self.queue[sid].append({'name': pkt.data[0],
|
||||
'args': pkt.data[1:],
|
||||
'namespace': pkt.namespace or '/'})
|
||||
elif pkt.packet_type == packet.ACK or \
|
||||
pkt.packet_type == packet.BINARY_ACK:
|
||||
self.acks[sid] = {'args': pkt.data,
|
||||
'namespace': pkt.namespace or '/'}
|
||||
elif pkt.packet_type == packet.DISCONNECT:
|
||||
self.connected[pkt.namespace or '/'] = False
|
||||
|
||||
self.app = app
|
||||
self.flask_test_client = flask_test_client
|
||||
self.sid = uuid.uuid4().hex
|
||||
self.queue[self.sid] = []
|
||||
self.acks[self.sid] = None
|
||||
self.callback_counter = 0
|
||||
self.socketio = socketio
|
||||
self.connected = {}
|
||||
socketio.server._send_packet = _mock_send_packet
|
||||
socketio.server.environ[self.sid] = {}
|
||||
socketio.server.async_handlers = False # easier to test when
|
||||
socketio.server.eio.async_handlers = False # events are sync
|
||||
if isinstance(socketio.server.manager, PubSubManager):
|
||||
raise RuntimeError('Test client cannot be used with a message '
|
||||
'queue. Disable the queue on your test '
|
||||
'configuration.')
|
||||
socketio.server.manager.initialize()
|
||||
self.connect(namespace=namespace, query_string=query_string,
|
||||
headers=headers)
|
||||
|
||||
def is_connected(self, namespace=None):
|
||||
"""Check if a namespace is connected.
|
||||
|
||||
:param namespace: The namespace to check. The global namespace is
|
||||
assumed if this argument is not provided.
|
||||
"""
|
||||
return self.connected.get(namespace or '/', False)
|
||||
|
||||
def connect(self, namespace=None, query_string=None, headers=None):
|
||||
"""Connect the client.
|
||||
|
||||
:param namespace: The namespace for the client. If not provided, the
|
||||
client connects to the server on the global
|
||||
namespace.
|
||||
:param query_string: A string with custom query string arguments.
|
||||
:param headers: A dictionary with custom HTTP headers.
|
||||
|
||||
Note that it is usually not necessary to explicitly call this method,
|
||||
since a connection is automatically established when an instance of
|
||||
this class is created. An example where it this method would be useful
|
||||
is when the application accepts multiple namespace connections.
|
||||
"""
|
||||
url = '/socket.io'
|
||||
if query_string:
|
||||
if query_string[0] != '?':
|
||||
query_string = '?' + query_string
|
||||
url += query_string
|
||||
environ = EnvironBuilder(url, headers=headers).get_environ()
|
||||
environ['flask.app'] = self.app
|
||||
if self.flask_test_client:
|
||||
# inject cookies from Flask
|
||||
self.flask_test_client.cookie_jar.inject_wsgi(environ)
|
||||
self.connected['/'] = True
|
||||
if self.socketio.server._handle_eio_connect(
|
||||
self.sid, environ) is False:
|
||||
del self.connected['/']
|
||||
if namespace is not None and namespace != '/':
|
||||
self.connected[namespace] = True
|
||||
pkt = packet.Packet(packet.CONNECT, namespace=namespace)
|
||||
with self.app.app_context():
|
||||
if self.socketio.server._handle_eio_message(
|
||||
self.sid, pkt.encode()) is False:
|
||||
del self.connected[namespace]
|
||||
|
||||
def disconnect(self, namespace=None):
|
||||
"""Disconnect the client.
|
||||
|
||||
:param namespace: The namespace to disconnect. The global namespace is
|
||||
assumed if this argument is not provided.
|
||||
"""
|
||||
if not self.is_connected(namespace):
|
||||
raise RuntimeError('not connected')
|
||||
pkt = packet.Packet(packet.DISCONNECT, namespace=namespace)
|
||||
with self.app.app_context():
|
||||
self.socketio.server._handle_eio_message(self.sid, pkt.encode())
|
||||
del self.connected[namespace or '/']
|
||||
|
||||
def emit(self, event, *args, **kwargs):
|
||||
"""Emit an event to the server.
|
||||
|
||||
:param event: The event name.
|
||||
:param *args: The event arguments.
|
||||
:param callback: ``True`` if the client requests a callback, ``False``
|
||||
if not. Note that client-side callbacks are not
|
||||
implemented, a callback request will just tell the
|
||||
server to provide the arguments to invoke the
|
||||
callback, but no callback is invoked. Instead, the
|
||||
arguments that the server provided for the callback
|
||||
are returned by this function.
|
||||
:param namespace: The namespace of the event. The global namespace is
|
||||
assumed if this argument is not provided.
|
||||
"""
|
||||
namespace = kwargs.pop('namespace', None)
|
||||
if not self.is_connected(namespace):
|
||||
raise RuntimeError('not connected')
|
||||
callback = kwargs.pop('callback', False)
|
||||
id = None
|
||||
if callback:
|
||||
self.callback_counter += 1
|
||||
id = self.callback_counter
|
||||
pkt = packet.Packet(packet.EVENT, data=[event] + list(args),
|
||||
namespace=namespace, id=id)
|
||||
with self.app.app_context():
|
||||
encoded_pkt = pkt.encode()
|
||||
if isinstance(encoded_pkt, list):
|
||||
for epkt in encoded_pkt:
|
||||
self.socketio.server._handle_eio_message(self.sid, epkt)
|
||||
else:
|
||||
self.socketio.server._handle_eio_message(self.sid, encoded_pkt)
|
||||
ack = self.acks.pop(self.sid, None)
|
||||
if ack is not None:
|
||||
return ack['args'][0] if len(ack['args']) == 1 \
|
||||
else ack['args']
|
||||
|
||||
def send(self, data, json=False, callback=False, namespace=None):
|
||||
"""Send a text or JSON message to the server.
|
||||
|
||||
:param data: A string, dictionary or list to send to the server.
|
||||
:param json: ``True`` to send a JSON message, ``False`` to send a text
|
||||
message.
|
||||
:param callback: ``True`` if the client requests a callback, ``False``
|
||||
if not. Note that client-side callbacks are not
|
||||
implemented, a callback request will just tell the
|
||||
server to provide the arguments to invoke the
|
||||
callback, but no callback is invoked. Instead, the
|
||||
arguments that the server provided for the callback
|
||||
are returned by this function.
|
||||
:param namespace: The namespace of the event. The global namespace is
|
||||
assumed if this argument is not provided.
|
||||
"""
|
||||
if json:
|
||||
msg = 'json'
|
||||
else:
|
||||
msg = 'message'
|
||||
return self.emit(msg, data, callback=callback, namespace=namespace)
|
||||
|
||||
def get_received(self, namespace=None):
|
||||
"""Return the list of messages received from the server.
|
||||
|
||||
Since this is not a real client, any time the server emits an event,
|
||||
the event is simply stored. The test code can invoke this method to
|
||||
obtain the list of events that were received since the last call.
|
||||
|
||||
:param namespace: The namespace to get events from. The global
|
||||
namespace is assumed if this argument is not
|
||||
provided.
|
||||
"""
|
||||
if not self.is_connected(namespace):
|
||||
raise RuntimeError('not connected')
|
||||
namespace = namespace or '/'
|
||||
r = [pkt for pkt in self.queue[self.sid]
|
||||
if pkt['namespace'] == namespace]
|
||||
self.queue[self.sid] = [pkt for pkt in self.queue[self.sid]
|
||||
if pkt not in r]
|
||||
return r
|
@ -0,0 +1,38 @@
|
||||
import sys
|
||||
|
||||
from .client import Client
|
||||
from .base_manager import BaseManager
|
||||
from .pubsub_manager import PubSubManager
|
||||
from .kombu_manager import KombuManager
|
||||
from .redis_manager import RedisManager
|
||||
from .kafka_manager import KafkaManager
|
||||
from .zmq_manager import ZmqManager
|
||||
from .server import Server
|
||||
from .namespace import Namespace, ClientNamespace
|
||||
from .middleware import WSGIApp, Middleware
|
||||
from .tornado import get_tornado_handler
|
||||
if sys.version_info >= (3, 5): # pragma: no cover
|
||||
from .asyncio_client import AsyncClient
|
||||
from .asyncio_server import AsyncServer
|
||||
from .asyncio_manager import AsyncManager
|
||||
from .asyncio_namespace import AsyncNamespace, AsyncClientNamespace
|
||||
from .asyncio_redis_manager import AsyncRedisManager
|
||||
from .asyncio_aiopika_manager import AsyncAioPikaManager
|
||||
from .asgi import ASGIApp
|
||||
else: # pragma: no cover
|
||||
AsyncClient = None
|
||||
AsyncServer = None
|
||||
AsyncManager = None
|
||||
AsyncNamespace = None
|
||||
AsyncRedisManager = None
|
||||
AsyncAioPikaManager = None
|
||||
|
||||
__version__ = '4.4.0'
|
||||
|
||||
__all__ = ['__version__', 'Client', 'Server', 'BaseManager', 'PubSubManager',
|
||||
'KombuManager', 'RedisManager', 'ZmqManager', 'KafkaManager',
|
||||
'Namespace', 'ClientNamespace', 'WSGIApp', 'Middleware']
|
||||
if AsyncServer is not None: # pragma: no cover
|
||||
__all__ += ['AsyncClient', 'AsyncServer', 'AsyncNamespace',
|
||||
'AsyncClientNamespace', 'AsyncManager', 'AsyncRedisManager',
|
||||
'ASGIApp', 'get_tornado_handler', 'AsyncAioPikaManager']
|
@ -0,0 +1,36 @@
|
||||
import engineio
|
||||
|
||||
|
||||
class ASGIApp(engineio.ASGIApp): # pragma: no cover
|
||||
"""ASGI application middleware for Socket.IO.
|
||||
|
||||
This middleware dispatches traffic to an Socket.IO application. It can
|
||||
also serve a list of static files to the client, or forward unrelated
|
||||
HTTP traffic to another ASGI application.
|
||||
|
||||
:param socketio_server: The Socket.IO server. Must be an instance of the
|
||||
``socketio.AsyncServer`` class.
|
||||
:param static_files: A dictionary with static file mapping rules. See the
|
||||
documentation for details on this argument.
|
||||
:param other_asgi_app: A separate ASGI app that receives all other traffic.
|
||||
:param socketio_path: The endpoint where the Socket.IO application should
|
||||
be installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
import socketio
|
||||
import uvicorn
|
||||
|
||||
sio = socketio.AsyncServer()
|
||||
app = engineio.ASGIApp(sio, static_files={
|
||||
'/': 'index.html',
|
||||
'/static': './public',
|
||||
})
|
||||
uvicorn.run(app, host='127.0.0.1', port=5000)
|
||||
"""
|
||||
def __init__(self, socketio_server, other_asgi_app=None,
|
||||
static_files=None, socketio_path='socket.io'):
|
||||
super().__init__(socketio_server, other_asgi_app,
|
||||
static_files=static_files,
|
||||
engineio_path=socketio_path)
|
@ -0,0 +1,105 @@
|
||||
import asyncio
|
||||
import pickle
|
||||
|
||||
from socketio.asyncio_pubsub_manager import AsyncPubSubManager
|
||||
|
||||
try:
|
||||
import aio_pika
|
||||
except ImportError:
|
||||
aio_pika = None
|
||||
|
||||
|
||||
class AsyncAioPikaManager(AsyncPubSubManager): # pragma: no cover
|
||||
"""Client manager that uses aio_pika for inter-process messaging under
|
||||
asyncio.
|
||||
|
||||
This class implements a client manager backend for event sharing across
|
||||
multiple processes, using RabbitMQ
|
||||
|
||||
To use a aio_pika backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
url = 'amqp://user:password@hostname:port//'
|
||||
server = socketio.Server(client_manager=socketio.AsyncAioPikaManager(
|
||||
url))
|
||||
|
||||
:param url: The connection URL for the backend messaging queue. Example
|
||||
connection URLs are ``'amqp://guest:guest@localhost:5672//'``
|
||||
for RabbitMQ.
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications. Must be the same in all the servers.
|
||||
With this manager, the channel name is the exchange name
|
||||
in rabbitmq
|
||||
:param write_only: If set ot ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
"""
|
||||
|
||||
name = 'asyncaiopika'
|
||||
|
||||
def __init__(self, url='amqp://guest:guest@localhost:5672//',
|
||||
channel='socketio', write_only=False, logger=None):
|
||||
if aio_pika is None:
|
||||
raise RuntimeError('aio_pika package is not installed '
|
||||
'(Run "pip install aio_pika" in your '
|
||||
'virtualenv).')
|
||||
self.url = url
|
||||
self.listener_connection = None
|
||||
self.listener_channel = None
|
||||
self.listener_queue = None
|
||||
super().__init__(channel=channel, write_only=write_only, logger=logger)
|
||||
|
||||
async def _connection(self):
|
||||
return await aio_pika.connect_robust(self.url)
|
||||
|
||||
async def _channel(self, connection):
|
||||
return await connection.channel()
|
||||
|
||||
async def _exchange(self, channel):
|
||||
return await channel.declare_exchange(self.channel,
|
||||
aio_pika.ExchangeType.FANOUT)
|
||||
|
||||
async def _queue(self, channel, exchange):
|
||||
queue = await channel.declare_queue(durable=False,
|
||||
arguments={'x-expires': 300000})
|
||||
await queue.bind(exchange)
|
||||
return queue
|
||||
|
||||
async def _publish(self, data):
|
||||
connection = await self._connection()
|
||||
channel = await self._channel(connection)
|
||||
exchange = await self._exchange(channel)
|
||||
await exchange.publish(
|
||||
aio_pika.Message(body=pickle.dumps(data),
|
||||
delivery_mode=aio_pika.DeliveryMode.PERSISTENT),
|
||||
routing_key='*'
|
||||
)
|
||||
|
||||
async def _listen(self):
|
||||
retry_sleep = 1
|
||||
while True:
|
||||
try:
|
||||
if self.listener_connection is None:
|
||||
self.listener_connection = await self._connection()
|
||||
self.listener_channel = await self._channel(
|
||||
self.listener_connection
|
||||
)
|
||||
await self.listener_channel.set_qos(prefetch_count=1)
|
||||
exchange = await self._exchange(self.listener_channel)
|
||||
self.listener_queue = await self._queue(
|
||||
self.listener_channel, exchange
|
||||
)
|
||||
|
||||
async with self.listener_queue.iterator() as queue_iter:
|
||||
async for message in queue_iter:
|
||||
with message.process():
|
||||
return pickle.loads(message.body)
|
||||
except Exception:
|
||||
self._get_logger().error('Cannot receive from rabbitmq... '
|
||||
'retrying in '
|
||||
'{} secs'.format(retry_sleep))
|
||||
self.listener_connection = None
|
||||
await asyncio.sleep(retry_sleep)
|
||||
retry_sleep *= 2
|
||||
if retry_sleep > 60:
|
||||
retry_sleep = 60
|
@ -0,0 +1,475 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import random
|
||||
|
||||
import engineio
|
||||
import six
|
||||
|
||||
from . import client
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
|
||||
default_logger = logging.getLogger('socketio.client')
|
||||
|
||||
|
||||
class AsyncClient(client.Client):
|
||||
"""A Socket.IO client for asyncio.
|
||||
|
||||
This class implements a fully compliant Socket.IO web client with support
|
||||
for websocket and long-polling transports.
|
||||
|
||||
:param reconnection: ``True`` if the client should automatically attempt to
|
||||
reconnect to the server after an interruption, or
|
||||
``False`` to not reconnect. The default is ``True``.
|
||||
:param reconnection_attempts: How many reconnection attempts to issue
|
||||
before giving up, or 0 for infinity attempts.
|
||||
The default is 0.
|
||||
:param reconnection_delay: How long to wait in seconds before the first
|
||||
reconnection attempt. Each successive attempt
|
||||
doubles this delay.
|
||||
:param reconnection_delay_max: The maximum delay between reconnection
|
||||
attempts.
|
||||
:param randomization_factor: Randomization amount for each delay between
|
||||
reconnection attempts. The default is 0.5,
|
||||
which means that each delay is randomly
|
||||
adjusted by +/- 50%.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param binary: ``True`` to support binary payloads, ``False`` to treat all
|
||||
payloads as text. On Python 2, if this is set to ``True``,
|
||||
``unicode`` values are treated as text, and ``str`` and
|
||||
``bytes`` values are treated as binary. This option has no
|
||||
effect on Python 3, where text and binary payloads are
|
||||
always automatically discovered.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
|
||||
The Engine.IO configuration supports the following settings:
|
||||
|
||||
:param request_timeout: A timeout in seconds for requests. The default is
|
||||
5 seconds.
|
||||
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
|
||||
skip SSL certificate verification, allowing
|
||||
connections to servers with self signed certificates.
|
||||
The default is ``True``.
|
||||
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
|
||||
a logger object to use. To disable logging set to
|
||||
``False``. The default is ``False``.
|
||||
"""
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
async def connect(self, url, headers={}, transports=None,
|
||||
namespaces=None, socketio_path='socket.io'):
|
||||
"""Connect to a Socket.IO server.
|
||||
|
||||
:param url: The URL of the Socket.IO server. It can include custom
|
||||
query string parameters if required by the server.
|
||||
:param headers: A dictionary with custom headers to send with the
|
||||
connection request.
|
||||
:param transports: The list of allowed transports. Valid transports
|
||||
are ``'polling'`` and ``'websocket'``. If not
|
||||
given, the polling transport is connected first,
|
||||
then an upgrade to websocket is attempted.
|
||||
:param namespaces: The list of custom namespaces to connect, in
|
||||
addition to the default namespace. If not given,
|
||||
the namespace list is obtained from the registered
|
||||
event handlers.
|
||||
:param socketio_path: The endpoint where the Socket.IO server is
|
||||
installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
|
||||
Example usage::
|
||||
|
||||
sio = socketio.Client()
|
||||
sio.connect('http://localhost:5000')
|
||||
"""
|
||||
self.connection_url = url
|
||||
self.connection_headers = headers
|
||||
self.connection_transports = transports
|
||||
self.connection_namespaces = namespaces
|
||||
self.socketio_path = socketio_path
|
||||
|
||||
if namespaces is None:
|
||||
namespaces = set(self.handlers.keys()).union(
|
||||
set(self.namespace_handlers.keys()))
|
||||
elif isinstance(namespaces, six.string_types):
|
||||
namespaces = [namespaces]
|
||||
self.connection_namespaces = namespaces
|
||||
self.namespaces = [n for n in namespaces if n != '/']
|
||||
try:
|
||||
await self.eio.connect(url, headers=headers,
|
||||
transports=transports,
|
||||
engineio_path=socketio_path)
|
||||
except engineio.exceptions.ConnectionError as exc:
|
||||
six.raise_from(exceptions.ConnectionError(exc.args[0]), None)
|
||||
self.connected = True
|
||||
|
||||
async def wait(self):
|
||||
"""Wait until the connection with the server ends.
|
||||
|
||||
Client applications can use this function to block the main thread
|
||||
during the life of the connection.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
while True:
|
||||
await self.eio.wait()
|
||||
await self.sleep(1) # give the reconnect task time to start up
|
||||
if not self._reconnect_task:
|
||||
break
|
||||
await self._reconnect_task
|
||||
if self.eio.state != 'connected':
|
||||
break
|
||||
|
||||
async def emit(self, event, data=None, namespace=None, callback=None):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
if namespace != '/' and namespace not in self.namespaces:
|
||||
raise exceptions.BadNamespaceError(
|
||||
namespace + ' is not a connected namespace.')
|
||||
self.logger.info('Emitting event "%s" [%s]', event, namespace)
|
||||
if callback is not None:
|
||||
id = self._generate_ack_id(namespace, callback)
|
||||
else:
|
||||
id = None
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
# tuples are expanded to multiple arguments, everything else is sent
|
||||
# as a single argument
|
||||
if isinstance(data, tuple):
|
||||
data = list(data)
|
||||
elif data is not None:
|
||||
data = [data]
|
||||
else:
|
||||
data = []
|
||||
await self._send_packet(packet.Packet(
|
||||
packet.EVENT, namespace=namespace, data=[event] + data, id=id,
|
||||
binary=binary))
|
||||
|
||||
async def send(self, data, namespace=None, callback=None):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
This function emits an event with the name ``'message'``. Use
|
||||
:func:`emit` to issue custom event names.
|
||||
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
await self.emit('message', data=data, namespace=namespace,
|
||||
callback=callback)
|
||||
|
||||
async def call(self, event, data=None, namespace=None, timeout=60):
|
||||
"""Emit a custom event to a client and wait for the response.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param timeout: The waiting timeout. If the timeout is reached before
|
||||
the client acknowledges the event, then a
|
||||
``TimeoutError`` exception is raised.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
callback_event = self.eio.create_event()
|
||||
callback_args = []
|
||||
|
||||
def event_callback(*args):
|
||||
callback_args.append(args)
|
||||
callback_event.set()
|
||||
|
||||
await self.emit(event, data=data, namespace=namespace,
|
||||
callback=event_callback)
|
||||
try:
|
||||
await asyncio.wait_for(callback_event.wait(), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
six.raise_from(exceptions.TimeoutError(), None)
|
||||
return callback_args[0] if len(callback_args[0]) > 1 \
|
||||
else callback_args[0][0] if len(callback_args[0]) == 1 \
|
||||
else None
|
||||
|
||||
async def disconnect(self):
|
||||
"""Disconnect from the server.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
# here we just request the disconnection
|
||||
# later in _handle_eio_disconnect we invoke the disconnect handler
|
||||
for n in self.namespaces:
|
||||
await self._send_packet(packet.Packet(packet.DISCONNECT,
|
||||
namespace=n))
|
||||
await self._send_packet(packet.Packet(
|
||||
packet.DISCONNECT, namespace='/'))
|
||||
self.connected = False
|
||||
await self.eio.disconnect(abort=True)
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
return self.eio.start_background_task(target, *args, **kwargs)
|
||||
|
||||
async def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.eio.sleep(seconds)
|
||||
|
||||
async def _send_packet(self, pkt):
|
||||
"""Send a Socket.IO packet to the server."""
|
||||
encoded_packet = pkt.encode()
|
||||
if isinstance(encoded_packet, list):
|
||||
binary = False
|
||||
for ep in encoded_packet:
|
||||
await self.eio.send(ep, binary=binary)
|
||||
binary = True
|
||||
else:
|
||||
await self.eio.send(encoded_packet, binary=False)
|
||||
|
||||
async def _handle_connect(self, namespace):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Namespace {} is connected'.format(namespace))
|
||||
await self._trigger_event('connect', namespace=namespace)
|
||||
if namespace == '/':
|
||||
for n in self.namespaces:
|
||||
await self._send_packet(packet.Packet(packet.CONNECT,
|
||||
namespace=n))
|
||||
elif namespace not in self.namespaces:
|
||||
self.namespaces.append(namespace)
|
||||
|
||||
async def _handle_disconnect(self, namespace):
|
||||
if not self.connected:
|
||||
return
|
||||
namespace = namespace or '/'
|
||||
if namespace == '/':
|
||||
for n in self.namespaces:
|
||||
await self._trigger_event('disconnect', namespace=n)
|
||||
self.namespaces = []
|
||||
await self._trigger_event('disconnect', namespace=namespace)
|
||||
if namespace in self.namespaces:
|
||||
self.namespaces.remove(namespace)
|
||||
if namespace == '/':
|
||||
self.connected = False
|
||||
|
||||
async def _handle_event(self, namespace, id, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Received event "%s" [%s]', data[0], namespace)
|
||||
r = await self._trigger_event(data[0], namespace, *data[1:])
|
||||
if id is not None:
|
||||
# send ACK packet with the response returned by the handler
|
||||
# tuples are expanded as multiple arguments
|
||||
if r is None:
|
||||
data = []
|
||||
elif isinstance(r, tuple):
|
||||
data = list(r)
|
||||
else:
|
||||
data = [r]
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
await self._send_packet(packet.Packet(
|
||||
packet.ACK, namespace=namespace, id=id, data=data,
|
||||
binary=binary))
|
||||
|
||||
async def _handle_ack(self, namespace, id, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Received ack [%s]', namespace)
|
||||
callback = None
|
||||
try:
|
||||
callback = self.callbacks[namespace][id]
|
||||
except KeyError:
|
||||
# if we get an unknown callback we just ignore it
|
||||
self.logger.warning('Unknown callback received, ignoring.')
|
||||
else:
|
||||
del self.callbacks[namespace][id]
|
||||
if callback is not None:
|
||||
if asyncio.iscoroutinefunction(callback):
|
||||
await callback(*data)
|
||||
else:
|
||||
callback(*data)
|
||||
|
||||
async def _handle_error(self, namespace, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Connection to namespace {} was rejected'.format(
|
||||
namespace))
|
||||
if data is None:
|
||||
data = tuple()
|
||||
elif not isinstance(data, (tuple, list)):
|
||||
data = (data,)
|
||||
await self._trigger_event('connect_error', namespace, *data)
|
||||
if namespace in self.namespaces:
|
||||
self.namespaces.remove(namespace)
|
||||
if namespace == '/':
|
||||
self.namespaces = []
|
||||
self.connected = False
|
||||
|
||||
async def _trigger_event(self, event, namespace, *args):
|
||||
"""Invoke an application event handler."""
|
||||
# first see if we have an explicit handler for the event
|
||||
if namespace in self.handlers and event in self.handlers[namespace]:
|
||||
if asyncio.iscoroutinefunction(self.handlers[namespace][event]):
|
||||
try:
|
||||
ret = await self.handlers[namespace][event](*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
ret = None
|
||||
else:
|
||||
ret = self.handlers[namespace][event](*args)
|
||||
return ret
|
||||
|
||||
# or else, forward the event to a namepsace handler if one exists
|
||||
elif namespace in self.namespace_handlers:
|
||||
return await self.namespace_handlers[namespace].trigger_event(
|
||||
event, *args)
|
||||
|
||||
async def _handle_reconnect(self):
|
||||
self._reconnect_abort.clear()
|
||||
client.reconnecting_clients.append(self)
|
||||
attempt_count = 0
|
||||
current_delay = self.reconnection_delay
|
||||
while True:
|
||||
delay = current_delay
|
||||
current_delay *= 2
|
||||
if delay > self.reconnection_delay_max:
|
||||
delay = self.reconnection_delay_max
|
||||
delay += self.randomization_factor * (2 * random.random() - 1)
|
||||
self.logger.info(
|
||||
'Connection failed, new attempt in {:.02f} seconds'.format(
|
||||
delay))
|
||||
try:
|
||||
await asyncio.wait_for(self._reconnect_abort.wait(), delay)
|
||||
self.logger.info('Reconnect task aborted')
|
||||
break
|
||||
except (asyncio.TimeoutError, asyncio.CancelledError):
|
||||
pass
|
||||
attempt_count += 1
|
||||
try:
|
||||
await self.connect(self.connection_url,
|
||||
headers=self.connection_headers,
|
||||
transports=self.connection_transports,
|
||||
namespaces=self.connection_namespaces,
|
||||
socketio_path=self.socketio_path)
|
||||
except (exceptions.ConnectionError, ValueError):
|
||||
pass
|
||||
else:
|
||||
self.logger.info('Reconnection successful')
|
||||
self._reconnect_task = None
|
||||
break
|
||||
if self.reconnection_attempts and \
|
||||
attempt_count >= self.reconnection_attempts:
|
||||
self.logger.info(
|
||||
'Maximum reconnection attempts reached, giving up')
|
||||
break
|
||||
client.reconnecting_clients.remove(self)
|
||||
|
||||
def _handle_eio_connect(self):
|
||||
"""Handle the Engine.IO connection event."""
|
||||
self.logger.info('Engine.IO connection established')
|
||||
self.sid = self.eio.sid
|
||||
|
||||
async def _handle_eio_message(self, data):
|
||||
"""Dispatch Engine.IO messages."""
|
||||
if self._binary_packet:
|
||||
pkt = self._binary_packet
|
||||
if pkt.add_attachment(data):
|
||||
self._binary_packet = None
|
||||
if pkt.packet_type == packet.BINARY_EVENT:
|
||||
await self._handle_event(pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
pkt = packet.Packet(encoded_packet=data)
|
||||
if pkt.packet_type == packet.CONNECT:
|
||||
await self._handle_connect(pkt.namespace)
|
||||
elif pkt.packet_type == packet.DISCONNECT:
|
||||
await self._handle_disconnect(pkt.namespace)
|
||||
elif pkt.packet_type == packet.EVENT:
|
||||
await self._handle_event(pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.ACK:
|
||||
await self._handle_ack(pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.BINARY_EVENT or \
|
||||
pkt.packet_type == packet.BINARY_ACK:
|
||||
self._binary_packet = pkt
|
||||
elif pkt.packet_type == packet.ERROR:
|
||||
await self._handle_error(pkt.namespace, pkt.data)
|
||||
else:
|
||||
raise ValueError('Unknown packet type.')
|
||||
|
||||
async def _handle_eio_disconnect(self):
|
||||
"""Handle the Engine.IO disconnection event."""
|
||||
self.logger.info('Engine.IO connection dropped')
|
||||
self._reconnect_abort.set()
|
||||
if self.connected:
|
||||
for n in self.namespaces:
|
||||
await self._trigger_event('disconnect', namespace=n)
|
||||
await self._trigger_event('disconnect', namespace='/')
|
||||
self.namespaces = []
|
||||
self.connected = False
|
||||
self.callbacks = {}
|
||||
self._binary_packet = None
|
||||
self.sid = None
|
||||
if self.eio.state == 'connected' and self.reconnection:
|
||||
self._reconnect_task = self.start_background_task(
|
||||
self._handle_reconnect)
|
||||
|
||||
def _engineio_client_class(self):
|
||||
return engineio.AsyncClient
|
@ -0,0 +1,58 @@
|
||||
import asyncio
|
||||
|
||||
from .base_manager import BaseManager
|
||||
|
||||
|
||||
class AsyncManager(BaseManager):
|
||||
"""Manage a client list for an asyncio server."""
|
||||
async def emit(self, event, data, namespace, room=None, skip_sid=None,
|
||||
callback=None, **kwargs):
|
||||
"""Emit a message to a single client, a room, or all the clients
|
||||
connected to the namespace.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
if namespace not in self.rooms or room not in self.rooms[namespace]:
|
||||
return
|
||||
tasks = []
|
||||
if not isinstance(skip_sid, list):
|
||||
skip_sid = [skip_sid]
|
||||
for sid in self.get_participants(namespace, room):
|
||||
if sid not in skip_sid:
|
||||
if callback is not None:
|
||||
id = self._generate_ack_id(sid, namespace, callback)
|
||||
else:
|
||||
id = None
|
||||
tasks.append(self.server._emit_internal(sid, event, data,
|
||||
namespace, id))
|
||||
if tasks == []: # pragma: no cover
|
||||
return
|
||||
await asyncio.wait(tasks)
|
||||
|
||||
async def close_room(self, room, namespace):
|
||||
"""Remove all participants from a room.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return super().close_room(room, namespace)
|
||||
|
||||
async def trigger_callback(self, sid, namespace, id, data):
|
||||
"""Invoke an application callback.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
callback = None
|
||||
try:
|
||||
callback = self.callbacks[sid][namespace][id]
|
||||
except KeyError:
|
||||
# if we get an unknown callback we just ignore it
|
||||
self._get_logger().warning('Unknown callback received, ignoring.')
|
||||
else:
|
||||
del self.callbacks[sid][namespace][id]
|
||||
if callback is not None:
|
||||
ret = callback(*data)
|
||||
if asyncio.iscoroutine(ret):
|
||||
try:
|
||||
await ret
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
pass
|
@ -0,0 +1,204 @@
|
||||
import asyncio
|
||||
|
||||
from socketio import namespace
|
||||
|
||||
|
||||
class AsyncNamespace(namespace.Namespace):
|
||||
"""Base class for asyncio server-side class-based namespaces.
|
||||
|
||||
A class-based namespace is a class that contains all the event handlers
|
||||
for a Socket.IO namespace. The event handlers are methods of the class
|
||||
with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
|
||||
``on_message``, ``on_json``, and so on. These can be regular functions or
|
||||
coroutines.
|
||||
|
||||
:param namespace: The Socket.IO namespace to be used with all the event
|
||||
handlers defined in this class. If this argument is
|
||||
omitted, the default namespace is used.
|
||||
"""
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
async def trigger_event(self, event, *args):
|
||||
"""Dispatch an event to the proper handler method.
|
||||
|
||||
In the most common usage, this method is not overloaded by subclasses,
|
||||
as it performs the routing of events to methods. However, this
|
||||
method can be overriden if special dispatching rules are needed, or if
|
||||
having a single method that catches all events is desired.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
handler_name = 'on_' + event
|
||||
if hasattr(self, handler_name):
|
||||
handler = getattr(self, handler_name)
|
||||
if asyncio.iscoroutinefunction(handler) is True:
|
||||
try:
|
||||
ret = await handler(*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
ret = None
|
||||
else:
|
||||
ret = handler(*args)
|
||||
return ret
|
||||
|
||||
async def emit(self, event, data=None, room=None, skip_sid=None,
|
||||
namespace=None, callback=None):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
The only difference with the :func:`socketio.Server.emit` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.emit(event, data=data, room=room,
|
||||
skip_sid=skip_sid,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
async def send(self, data, room=None, skip_sid=None, namespace=None,
|
||||
callback=None):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
The only difference with the :func:`socketio.Server.send` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.send(data, room=room, skip_sid=skip_sid,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
async def close_room(self, room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
The only difference with the :func:`socketio.Server.close_room` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.close_room(
|
||||
room, namespace=namespace or self.namespace)
|
||||
|
||||
async def get_session(self, sid, namespace=None):
|
||||
"""Return the user session for a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.get_session`
|
||||
method is that when the ``namespace`` argument is not given the
|
||||
namespace associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.get_session(
|
||||
sid, namespace=namespace or self.namespace)
|
||||
|
||||
async def save_session(self, sid, session, namespace=None):
|
||||
"""Store the user session for a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.save_session`
|
||||
method is that when the ``namespace`` argument is not given the
|
||||
namespace associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.save_session(
|
||||
sid, session, namespace=namespace or self.namespace)
|
||||
|
||||
def session(self, sid, namespace=None):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
The only difference with the :func:`socketio.Server.session` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.session(sid, namespace=namespace or self.namespace)
|
||||
|
||||
async def disconnect(self, sid, namespace=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.disconnect` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.server.disconnect(
|
||||
sid, namespace=namespace or self.namespace)
|
||||
|
||||
|
||||
class AsyncClientNamespace(namespace.ClientNamespace):
|
||||
"""Base class for asyncio client-side class-based namespaces.
|
||||
|
||||
A class-based namespace is a class that contains all the event handlers
|
||||
for a Socket.IO namespace. The event handlers are methods of the class
|
||||
with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
|
||||
``on_message``, ``on_json``, and so on. These can be regular functions or
|
||||
coroutines.
|
||||
|
||||
:param namespace: The Socket.IO namespace to be used with all the event
|
||||
handlers defined in this class. If this argument is
|
||||
omitted, the default namespace is used.
|
||||
"""
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
async def trigger_event(self, event, *args):
|
||||
"""Dispatch an event to the proper handler method.
|
||||
|
||||
In the most common usage, this method is not overloaded by subclasses,
|
||||
as it performs the routing of events to methods. However, this
|
||||
method can be overriden if special dispatching rules are needed, or if
|
||||
having a single method that catches all events is desired.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
handler_name = 'on_' + event
|
||||
if hasattr(self, handler_name):
|
||||
handler = getattr(self, handler_name)
|
||||
if asyncio.iscoroutinefunction(handler) is True:
|
||||
try:
|
||||
ret = await handler(*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
ret = None
|
||||
else:
|
||||
ret = handler(*args)
|
||||
return ret
|
||||
|
||||
async def emit(self, event, data=None, namespace=None, callback=None):
|
||||
"""Emit a custom event to the server.
|
||||
|
||||
The only difference with the :func:`socketio.Client.emit` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.client.emit(event, data=data,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
async def send(self, data, namespace=None, callback=None):
|
||||
"""Send a message to the server.
|
||||
|
||||
The only difference with the :func:`socketio.Client.send` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.client.send(data,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
async def disconnect(self):
|
||||
"""Disconnect a client.
|
||||
|
||||
The only difference with the :func:`socketio.Client.disconnect` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.client.disconnect()
|
@ -0,0 +1,163 @@
|
||||
from functools import partial
|
||||
import uuid
|
||||
|
||||
import json
|
||||
import pickle
|
||||
import six
|
||||
|
||||
from .asyncio_manager import AsyncManager
|
||||
|
||||
|
||||
class AsyncPubSubManager(AsyncManager):
|
||||
"""Manage a client list attached to a pub/sub backend under asyncio.
|
||||
|
||||
This is a base class that enables multiple servers to share the list of
|
||||
clients, with the servers communicating events through a pub/sub backend.
|
||||
The use of a pub/sub backend also allows any client connected to the
|
||||
backend to emit events addressed to Socket.IO clients.
|
||||
|
||||
The actual backends must be implemented by subclasses, this class only
|
||||
provides a pub/sub generic framework for asyncio applications.
|
||||
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications.
|
||||
"""
|
||||
name = 'asyncpubsub'
|
||||
|
||||
def __init__(self, channel='socketio', write_only=False, logger=None):
|
||||
super().__init__()
|
||||
self.channel = channel
|
||||
self.write_only = write_only
|
||||
self.host_id = uuid.uuid4().hex
|
||||
self.logger = logger
|
||||
|
||||
def initialize(self):
|
||||
super().initialize()
|
||||
if not self.write_only:
|
||||
self.thread = self.server.start_background_task(self._thread)
|
||||
self._get_logger().info(self.name + ' backend initialized.')
|
||||
|
||||
async def emit(self, event, data, namespace=None, room=None, skip_sid=None,
|
||||
callback=None, **kwargs):
|
||||
"""Emit a message to a single client, a room, or all the clients
|
||||
connected to the namespace.
|
||||
|
||||
This method takes care or propagating the message to all the servers
|
||||
that are connected through the message queue.
|
||||
|
||||
The parameters are the same as in :meth:`.Server.emit`.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
if kwargs.get('ignore_queue'):
|
||||
return await super().emit(
|
||||
event, data, namespace=namespace, room=room, skip_sid=skip_sid,
|
||||
callback=callback)
|
||||
namespace = namespace or '/'
|
||||
if callback is not None:
|
||||
if self.server is None:
|
||||
raise RuntimeError('Callbacks can only be issued from the '
|
||||
'context of a server.')
|
||||
if room is None:
|
||||
raise ValueError('Cannot use callback without a room set.')
|
||||
id = self._generate_ack_id(room, namespace, callback)
|
||||
callback = (room, namespace, id)
|
||||
else:
|
||||
callback = None
|
||||
await self._publish({'method': 'emit', 'event': event, 'data': data,
|
||||
'namespace': namespace, 'room': room,
|
||||
'skip_sid': skip_sid, 'callback': callback,
|
||||
'host_id': self.host_id})
|
||||
|
||||
async def close_room(self, room, namespace=None):
|
||||
await self._publish({'method': 'close_room', 'room': room,
|
||||
'namespace': namespace or '/'})
|
||||
|
||||
async def _publish(self, data):
|
||||
"""Publish a message on the Socket.IO channel.
|
||||
|
||||
This method needs to be implemented by the different subclasses that
|
||||
support pub/sub backends.
|
||||
"""
|
||||
raise NotImplementedError('This method must be implemented in a '
|
||||
'subclass.') # pragma: no cover
|
||||
|
||||
async def _listen(self):
|
||||
"""Return the next message published on the Socket.IO channel,
|
||||
blocking until a message is available.
|
||||
|
||||
This method needs to be implemented by the different subclasses that
|
||||
support pub/sub backends.
|
||||
"""
|
||||
raise NotImplementedError('This method must be implemented in a '
|
||||
'subclass.') # pragma: no cover
|
||||
|
||||
async def _handle_emit(self, message):
|
||||
# Events with callbacks are very tricky to handle across hosts
|
||||
# Here in the receiving end we set up a local callback that preserves
|
||||
# the callback host and id from the sender
|
||||
remote_callback = message.get('callback')
|
||||
remote_host_id = message.get('host_id')
|
||||
if remote_callback is not None and len(remote_callback) == 3:
|
||||
callback = partial(self._return_callback, remote_host_id,
|
||||
*remote_callback)
|
||||
else:
|
||||
callback = None
|
||||
await super().emit(message['event'], message['data'],
|
||||
namespace=message.get('namespace'),
|
||||
room=message.get('room'),
|
||||
skip_sid=message.get('skip_sid'),
|
||||
callback=callback)
|
||||
|
||||
async def _handle_callback(self, message):
|
||||
if self.host_id == message.get('host_id'):
|
||||
try:
|
||||
sid = message['sid']
|
||||
namespace = message['namespace']
|
||||
id = message['id']
|
||||
args = message['args']
|
||||
except KeyError:
|
||||
return
|
||||
await self.trigger_callback(sid, namespace, id, args)
|
||||
|
||||
async def _return_callback(self, host_id, sid, namespace, callback_id,
|
||||
*args):
|
||||
# When an event callback is received, the callback is returned back
|
||||
# the sender, which is identified by the host_id
|
||||
await self._publish({'method': 'callback', 'host_id': host_id,
|
||||
'sid': sid, 'namespace': namespace,
|
||||
'id': callback_id, 'args': args})
|
||||
|
||||
async def _handle_close_room(self, message):
|
||||
await super().close_room(
|
||||
room=message.get('room'), namespace=message.get('namespace'))
|
||||
|
||||
async def _thread(self):
|
||||
while True:
|
||||
try:
|
||||
message = await self._listen()
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
break
|
||||
data = None
|
||||
if isinstance(message, dict):
|
||||
data = message
|
||||
else:
|
||||
if isinstance(message, six.binary_type): # pragma: no cover
|
||||
try:
|
||||
data = pickle.loads(message)
|
||||
except:
|
||||
pass
|
||||
if data is None:
|
||||
try:
|
||||
data = json.loads(message)
|
||||
except:
|
||||
pass
|
||||
if data and 'method' in data:
|
||||
if data['method'] == 'emit':
|
||||
await self._handle_emit(data)
|
||||
elif data['method'] == 'callback':
|
||||
await self._handle_callback(data)
|
||||
elif data['method'] == 'close_room':
|
||||
await self._handle_close_room(data)
|
@ -0,0 +1,107 @@
|
||||
import asyncio
|
||||
import pickle
|
||||
from urllib.parse import urlparse
|
||||
|
||||
try:
|
||||
import aioredis
|
||||
except ImportError:
|
||||
aioredis = None
|
||||
|
||||
from .asyncio_pubsub_manager import AsyncPubSubManager
|
||||
|
||||
|
||||
def _parse_redis_url(url):
|
||||
p = urlparse(url)
|
||||
if p.scheme not in {'redis', 'rediss'}:
|
||||
raise ValueError('Invalid redis url')
|
||||
ssl = p.scheme == 'rediss'
|
||||
host = p.hostname or 'localhost'
|
||||
port = p.port or 6379
|
||||
password = p.password
|
||||
if p.path:
|
||||
db = int(p.path[1:])
|
||||
else:
|
||||
db = 0
|
||||
return host, port, password, db, ssl
|
||||
|
||||
|
||||
class AsyncRedisManager(AsyncPubSubManager): # pragma: no cover
|
||||
"""Redis based client manager for asyncio servers.
|
||||
|
||||
This class implements a Redis backend for event sharing across multiple
|
||||
processes. Only kept here as one more example of how to build a custom
|
||||
backend, since the kombu backend is perfectly adequate to support a Redis
|
||||
message queue.
|
||||
|
||||
To use a Redis backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
server = socketio.Server(client_manager=socketio.AsyncRedisManager(
|
||||
'redis://hostname:port/0'))
|
||||
|
||||
:param url: The connection URL for the Redis server. For a default Redis
|
||||
store running on the same host, use ``redis://``. To use an
|
||||
SSL connection, use ``rediss://``.
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications. Must be the same in all the servers.
|
||||
:param write_only: If set ot ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
"""
|
||||
name = 'aioredis'
|
||||
|
||||
def __init__(self, url='redis://localhost:6379/0', channel='socketio',
|
||||
write_only=False, logger=None):
|
||||
if aioredis is None:
|
||||
raise RuntimeError('Redis package is not installed '
|
||||
'(Run "pip install aioredis" in your '
|
||||
'virtualenv).')
|
||||
(
|
||||
self.host, self.port, self.password, self.db, self.ssl
|
||||
) = _parse_redis_url(url)
|
||||
self.pub = None
|
||||
self.sub = None
|
||||
super().__init__(channel=channel, write_only=write_only, logger=logger)
|
||||
|
||||
async def _publish(self, data):
|
||||
retry = True
|
||||
while True:
|
||||
try:
|
||||
if self.pub is None:
|
||||
self.pub = await aioredis.create_redis(
|
||||
(self.host, self.port), db=self.db,
|
||||
password=self.password, ssl=self.ssl
|
||||
)
|
||||
return await self.pub.publish(self.channel,
|
||||
pickle.dumps(data))
|
||||
except (aioredis.RedisError, OSError):
|
||||
if retry:
|
||||
self._get_logger().error('Cannot publish to redis... '
|
||||
'retrying')
|
||||
self.pub = None
|
||||
retry = False
|
||||
else:
|
||||
self._get_logger().error('Cannot publish to redis... '
|
||||
'giving up')
|
||||
break
|
||||
|
||||
async def _listen(self):
|
||||
retry_sleep = 1
|
||||
while True:
|
||||
try:
|
||||
if self.sub is None:
|
||||
self.sub = await aioredis.create_redis(
|
||||
(self.host, self.port), db=self.db,
|
||||
password=self.password, ssl=self.ssl
|
||||
)
|
||||
self.ch = (await self.sub.subscribe(self.channel))[0]
|
||||
return await self.ch.get()
|
||||
except (aioredis.RedisError, OSError):
|
||||
self._get_logger().error('Cannot receive from redis... '
|
||||
'retrying in '
|
||||
'{} secs'.format(retry_sleep))
|
||||
self.sub = None
|
||||
await asyncio.sleep(retry_sleep)
|
||||
retry_sleep *= 2
|
||||
if retry_sleep > 60:
|
||||
retry_sleep = 60
|
@ -0,0 +1,526 @@
|
||||
import asyncio
|
||||
|
||||
import engineio
|
||||
import six
|
||||
|
||||
from . import asyncio_manager
|
||||
from . import exceptions
|
||||
from . import packet
|
||||
from . import server
|
||||
|
||||
|
||||
class AsyncServer(server.Server):
|
||||
"""A Socket.IO server for asyncio.
|
||||
|
||||
This class implements a fully compliant Socket.IO web server with support
|
||||
for websocket and long-polling transports, compatible with the asyncio
|
||||
framework on Python 3.5 or newer.
|
||||
|
||||
:param client_manager: The client manager instance that will manage the
|
||||
client list. When this is omitted, the client list
|
||||
is stored in an in-memory structure, so the use of
|
||||
multiple connected servers is not possible.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param async_handlers: If set to ``True``, event handlers are executed in
|
||||
separate threads. To run handlers synchronously,
|
||||
set to ``False``. The default is ``True``.
|
||||
:param kwargs: Connection parameters for the underlying Engine.IO server.
|
||||
|
||||
The Engine.IO configuration supports the following settings:
|
||||
|
||||
:param async_mode: The asynchronous model to use. See the Deployment
|
||||
section in the documentation for a description of the
|
||||
available options. Valid async modes are "aiohttp". If
|
||||
this argument is not given, an async mode is chosen
|
||||
based on the installed packages.
|
||||
:param ping_timeout: The time in seconds that the client waits for the
|
||||
server to respond before disconnecting.
|
||||
:param ping_interval: The interval in seconds at which the client pings
|
||||
the server.
|
||||
:param max_http_buffer_size: The maximum size of a message when using the
|
||||
polling transport.
|
||||
:param allow_upgrades: Whether to allow transport upgrades or not.
|
||||
:param http_compression: Whether to compress packages when using the
|
||||
polling transport.
|
||||
:param compression_threshold: Only compress messages when their byte size
|
||||
is greater than this value.
|
||||
:param cookie: Name of the HTTP cookie that contains the client session
|
||||
id. If set to ``None``, a cookie is not sent to the client.
|
||||
:param cors_allowed_origins: Origin or list of origins that are allowed to
|
||||
connect to this server. Only the same origin
|
||||
is allowed by default. Set this argument to
|
||||
``'*'`` to allow all origins, or to ``[]`` to
|
||||
disable CORS handling.
|
||||
:param cors_credentials: Whether credentials (cookies, authentication) are
|
||||
allowed in requests to this server.
|
||||
:param monitor_clients: If set to ``True``, a background task will ensure
|
||||
inactive clients are closed. Set to ``False`` to
|
||||
disable the monitoring task (not recommended). The
|
||||
default is ``True``.
|
||||
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
|
||||
a logger object to use. To disable logging set to
|
||||
``False``.
|
||||
"""
|
||||
def __init__(self, client_manager=None, logger=False, json=None,
|
||||
async_handlers=True, **kwargs):
|
||||
if client_manager is None:
|
||||
client_manager = asyncio_manager.AsyncManager()
|
||||
super().__init__(client_manager=client_manager, logger=logger,
|
||||
binary=False, json=json,
|
||||
async_handlers=async_handlers, **kwargs)
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return True
|
||||
|
||||
def attach(self, app, socketio_path='socket.io'):
|
||||
"""Attach the Socket.IO server to an application."""
|
||||
self.eio.attach(app, socketio_path)
|
||||
|
||||
async def emit(self, event, data=None, to=None, room=None, skip_sid=None,
|
||||
namespace=None, callback=None, **kwargs):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The recipient of the message. This can be set to the
|
||||
session ID of a client to address only that client, or to
|
||||
to any custom room created by the application to address all
|
||||
the clients in that room, If this argument is omitted the
|
||||
event is broadcasted to all connected clients.
|
||||
:param room: Alias for the ``to`` parameter.
|
||||
:param skip_sid: The session ID of a client to skip when broadcasting
|
||||
to a room or to all clients. This can be used to
|
||||
prevent a message from being sent to the sender.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
room = to or room
|
||||
self.logger.info('emitting event "%s" to %s [%s]', event,
|
||||
room or 'all', namespace)
|
||||
await self.manager.emit(event, data, namespace, room=room,
|
||||
skip_sid=skip_sid, callback=callback,
|
||||
**kwargs)
|
||||
|
||||
async def send(self, data, to=None, room=None, skip_sid=None,
|
||||
namespace=None, callback=None, **kwargs):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
This function emits an event with the name ``'message'``. Use
|
||||
:func:`emit` to issue custom event names.
|
||||
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The recipient of the message. This can be set to the
|
||||
session ID of a client to address only that client, or to
|
||||
to any custom room created by the application to address all
|
||||
the clients in that room, If this argument is omitted the
|
||||
event is broadcasted to all connected clients.
|
||||
:param room: Alias for the ``to`` parameter.
|
||||
:param skip_sid: The session ID of a client to skip when broadcasting
|
||||
to a room or to all clients. This can be used to
|
||||
prevent a message from being sent to the sender.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
await self.emit('message', data=data, to=to, room=room,
|
||||
skip_sid=skip_sid, namespace=namespace,
|
||||
callback=callback, **kwargs)
|
||||
|
||||
async def call(self, event, data=None, to=None, sid=None, namespace=None,
|
||||
timeout=60, **kwargs):
|
||||
"""Emit a custom event to a client and wait for the response.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The session ID of the recipient client.
|
||||
:param sid: Alias for the ``to`` parameter.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param timeout: The waiting timeout. If the timeout is reached before
|
||||
the client acknowledges the event, then a
|
||||
``TimeoutError`` exception is raised.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
client directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
"""
|
||||
if not self.async_handlers:
|
||||
raise RuntimeError(
|
||||
'Cannot use call() when async_handlers is False.')
|
||||
callback_event = self.eio.create_event()
|
||||
callback_args = []
|
||||
|
||||
def event_callback(*args):
|
||||
callback_args.append(args)
|
||||
callback_event.set()
|
||||
|
||||
await self.emit(event, data=data, room=to or sid, namespace=namespace,
|
||||
callback=event_callback, **kwargs)
|
||||
try:
|
||||
await asyncio.wait_for(callback_event.wait(), timeout)
|
||||
except asyncio.TimeoutError:
|
||||
six.raise_from(exceptions.TimeoutError(), None)
|
||||
return callback_args[0] if len(callback_args[0]) > 1 \
|
||||
else callback_args[0][0] if len(callback_args[0]) == 1 \
|
||||
else None
|
||||
|
||||
async def close_room(self, room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
This function removes all the clients from the given room.
|
||||
|
||||
:param room: Room name.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the default namespace is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('room %s is closing [%s]', room, namespace)
|
||||
await self.manager.close_room(room, namespace)
|
||||
|
||||
async def get_session(self, sid, namespace=None):
|
||||
"""Return the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param namespace: The Socket.IO namespace. If this argument is omitted
|
||||
the default namespace is used.
|
||||
|
||||
The return value is a dictionary. Modifications made to this
|
||||
dictionary are not guaranteed to be preserved. If you want to modify
|
||||
the user session, use the ``session`` context manager instead.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
eio_session = await self.eio.get_session(sid)
|
||||
return eio_session.setdefault(namespace, {})
|
||||
|
||||
async def save_session(self, sid, session, namespace=None):
|
||||
"""Store the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param session: The session dictionary.
|
||||
:param namespace: The Socket.IO namespace. If this argument is omitted
|
||||
the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
eio_session = await self.eio.get_session(sid)
|
||||
eio_session[namespace] = session
|
||||
|
||||
def session(self, sid, namespace=None):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
This is a context manager that returns the user session dictionary for
|
||||
the client. Any changes that are made to this dictionary inside the
|
||||
context manager block are saved back to the session. Example usage::
|
||||
|
||||
@eio.on('connect')
|
||||
def on_connect(sid, environ):
|
||||
username = authenticate_user(environ)
|
||||
if not username:
|
||||
return False
|
||||
with eio.session(sid) as session:
|
||||
session['username'] = username
|
||||
|
||||
@eio.on('message')
|
||||
def on_message(sid, msg):
|
||||
async with eio.session(sid) as session:
|
||||
print('received message from ', session['username'])
|
||||
"""
|
||||
class _session_context_manager(object):
|
||||
def __init__(self, server, sid, namespace):
|
||||
self.server = server
|
||||
self.sid = sid
|
||||
self.namespace = namespace
|
||||
self.session = None
|
||||
|
||||
async def __aenter__(self):
|
||||
self.session = await self.server.get_session(
|
||||
sid, namespace=self.namespace)
|
||||
return self.session
|
||||
|
||||
async def __aexit__(self, *args):
|
||||
await self.server.save_session(sid, self.session,
|
||||
namespace=self.namespace)
|
||||
|
||||
return _session_context_manager(self, sid, namespace)
|
||||
|
||||
async def disconnect(self, sid, namespace=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
:param sid: Session ID of the client.
|
||||
:param namespace: The Socket.IO namespace to disconnect. If this
|
||||
argument is omitted the default namespace is used.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
if self.manager.is_connected(sid, namespace=namespace):
|
||||
self.logger.info('Disconnecting %s [%s]', sid, namespace)
|
||||
self.manager.pre_disconnect(sid, namespace=namespace)
|
||||
await self._send_packet(sid, packet.Packet(packet.DISCONNECT,
|
||||
namespace=namespace))
|
||||
await self._trigger_event('disconnect', namespace, sid)
|
||||
self.manager.disconnect(sid, namespace=namespace)
|
||||
if namespace == '/':
|
||||
await self.eio.disconnect(sid)
|
||||
|
||||
async def handle_request(self, *args, **kwargs):
|
||||
"""Handle an HTTP request from the client.
|
||||
|
||||
This is the entry point of the Socket.IO application. This function
|
||||
returns the HTTP response body to deliver to the client.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.eio.handle_request(*args, **kwargs)
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute. Must be a coroutine.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
The return value is a ``asyncio.Task`` object.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return self.eio.start_background_task(target, *args, **kwargs)
|
||||
|
||||
async def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
|
||||
Note: this method is a coroutine.
|
||||
"""
|
||||
return await self.eio.sleep(seconds)
|
||||
|
||||
async def _emit_internal(self, sid, event, data, namespace=None, id=None):
|
||||
"""Send a message to a client."""
|
||||
# tuples are expanded to multiple arguments, everything else is sent
|
||||
# as a single argument
|
||||
if isinstance(data, tuple):
|
||||
data = list(data)
|
||||
else:
|
||||
data = [data]
|
||||
await self._send_packet(sid, packet.Packet(
|
||||
packet.EVENT, namespace=namespace, data=[event] + data, id=id,
|
||||
binary=None))
|
||||
|
||||
async def _send_packet(self, sid, pkt):
|
||||
"""Send a Socket.IO packet to a client."""
|
||||
encoded_packet = pkt.encode()
|
||||
if isinstance(encoded_packet, list):
|
||||
binary = False
|
||||
for ep in encoded_packet:
|
||||
await self.eio.send(sid, ep, binary=binary)
|
||||
binary = True
|
||||
else:
|
||||
await self.eio.send(sid, encoded_packet, binary=False)
|
||||
|
||||
async def _handle_connect(self, sid, namespace):
|
||||
"""Handle a client connection request."""
|
||||
namespace = namespace or '/'
|
||||
self.manager.connect(sid, namespace)
|
||||
if self.always_connect:
|
||||
await self._send_packet(sid, packet.Packet(packet.CONNECT,
|
||||
namespace=namespace))
|
||||
fail_reason = None
|
||||
try:
|
||||
success = await self._trigger_event('connect', namespace, sid,
|
||||
self.environ[sid])
|
||||
except exceptions.ConnectionRefusedError as exc:
|
||||
fail_reason = exc.error_args
|
||||
success = False
|
||||
|
||||
if success is False:
|
||||
if self.always_connect:
|
||||
self.manager.pre_disconnect(sid, namespace)
|
||||
await self._send_packet(sid, packet.Packet(
|
||||
packet.DISCONNECT, data=fail_reason, namespace=namespace))
|
||||
self.manager.disconnect(sid, namespace)
|
||||
if not self.always_connect:
|
||||
await self._send_packet(sid, packet.Packet(
|
||||
packet.ERROR, data=fail_reason, namespace=namespace))
|
||||
if sid in self.environ: # pragma: no cover
|
||||
del self.environ[sid]
|
||||
elif not self.always_connect:
|
||||
await self._send_packet(sid, packet.Packet(packet.CONNECT,
|
||||
namespace=namespace))
|
||||
|
||||
async def _handle_disconnect(self, sid, namespace):
|
||||
"""Handle a client disconnect."""
|
||||
namespace = namespace or '/'
|
||||
if namespace == '/':
|
||||
namespace_list = list(self.manager.get_namespaces())
|
||||
else:
|
||||
namespace_list = [namespace]
|
||||
for n in namespace_list:
|
||||
if n != '/' and self.manager.is_connected(sid, n):
|
||||
await self._trigger_event('disconnect', n, sid)
|
||||
self.manager.disconnect(sid, n)
|
||||
if namespace == '/' and self.manager.is_connected(sid, namespace):
|
||||
await self._trigger_event('disconnect', '/', sid)
|
||||
self.manager.disconnect(sid, '/')
|
||||
|
||||
async def _handle_event(self, sid, namespace, id, data):
|
||||
"""Handle an incoming client event."""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('received event "%s" from %s [%s]', data[0], sid,
|
||||
namespace)
|
||||
if not self.manager.is_connected(sid, namespace):
|
||||
self.logger.warning('%s is not connected to namespace %s',
|
||||
sid, namespace)
|
||||
return
|
||||
if self.async_handlers:
|
||||
self.start_background_task(self._handle_event_internal, self, sid,
|
||||
data, namespace, id)
|
||||
else:
|
||||
await self._handle_event_internal(self, sid, data, namespace, id)
|
||||
|
||||
async def _handle_event_internal(self, server, sid, data, namespace, id):
|
||||
r = await server._trigger_event(data[0], namespace, sid, *data[1:])
|
||||
if id is not None:
|
||||
# send ACK packet with the response returned by the handler
|
||||
# tuples are expanded as multiple arguments
|
||||
if r is None:
|
||||
data = []
|
||||
elif isinstance(r, tuple):
|
||||
data = list(r)
|
||||
else:
|
||||
data = [r]
|
||||
await server._send_packet(sid, packet.Packet(packet.ACK,
|
||||
namespace=namespace,
|
||||
id=id, data=data,
|
||||
binary=None))
|
||||
|
||||
async def _handle_ack(self, sid, namespace, id, data):
|
||||
"""Handle ACK packets from the client."""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('received ack from %s [%s]', sid, namespace)
|
||||
await self.manager.trigger_callback(sid, namespace, id, data)
|
||||
|
||||
async def _trigger_event(self, event, namespace, *args):
|
||||
"""Invoke an application event handler."""
|
||||
# first see if we have an explicit handler for the event
|
||||
if namespace in self.handlers and event in self.handlers[namespace]:
|
||||
if asyncio.iscoroutinefunction(self.handlers[namespace][event]) \
|
||||
is True:
|
||||
try:
|
||||
ret = await self.handlers[namespace][event](*args)
|
||||
except asyncio.CancelledError: # pragma: no cover
|
||||
ret = None
|
||||
else:
|
||||
ret = self.handlers[namespace][event](*args)
|
||||
return ret
|
||||
|
||||
# or else, forward the event to a namepsace handler if one exists
|
||||
elif namespace in self.namespace_handlers:
|
||||
return await self.namespace_handlers[namespace].trigger_event(
|
||||
event, *args)
|
||||
|
||||
async def _handle_eio_connect(self, sid, environ):
|
||||
"""Handle the Engine.IO connection event."""
|
||||
if not self.manager_initialized:
|
||||
self.manager_initialized = True
|
||||
self.manager.initialize()
|
||||
self.environ[sid] = environ
|
||||
return await self._handle_connect(sid, '/')
|
||||
|
||||
async def _handle_eio_message(self, sid, data):
|
||||
"""Dispatch Engine.IO messages."""
|
||||
if sid in self._binary_packet:
|
||||
pkt = self._binary_packet[sid]
|
||||
if pkt.add_attachment(data):
|
||||
del self._binary_packet[sid]
|
||||
if pkt.packet_type == packet.BINARY_EVENT:
|
||||
await self._handle_event(sid, pkt.namespace, pkt.id,
|
||||
pkt.data)
|
||||
else:
|
||||
await self._handle_ack(sid, pkt.namespace, pkt.id,
|
||||
pkt.data)
|
||||
else:
|
||||
pkt = packet.Packet(encoded_packet=data)
|
||||
if pkt.packet_type == packet.CONNECT:
|
||||
await self._handle_connect(sid, pkt.namespace)
|
||||
elif pkt.packet_type == packet.DISCONNECT:
|
||||
await self._handle_disconnect(sid, pkt.namespace)
|
||||
elif pkt.packet_type == packet.EVENT:
|
||||
await self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.ACK:
|
||||
await self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.BINARY_EVENT or \
|
||||
pkt.packet_type == packet.BINARY_ACK:
|
||||
self._binary_packet[sid] = pkt
|
||||
elif pkt.packet_type == packet.ERROR:
|
||||
raise ValueError('Unexpected ERROR packet.')
|
||||
else:
|
||||
raise ValueError('Unknown packet type.')
|
||||
|
||||
async def _handle_eio_disconnect(self, sid):
|
||||
"""Handle Engine.IO disconnect event."""
|
||||
await self._handle_disconnect(sid, '/')
|
||||
if sid in self.environ:
|
||||
del self.environ[sid]
|
||||
|
||||
def _engineio_server_class(self):
|
||||
return engineio.AsyncServer
|
@ -0,0 +1,178 @@
|
||||
import itertools
|
||||
import logging
|
||||
|
||||
import six
|
||||
|
||||
default_logger = logging.getLogger('socketio')
|
||||
|
||||
|
||||
class BaseManager(object):
|
||||
"""Manage client connections.
|
||||
|
||||
This class keeps track of all the clients and the rooms they are in, to
|
||||
support the broadcasting of messages. The data used by this class is
|
||||
stored in a memory structure, making it appropriate only for single process
|
||||
services. More sophisticated storage backends can be implemented by
|
||||
subclasses.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.logger = None
|
||||
self.server = None
|
||||
self.rooms = {}
|
||||
self.callbacks = {}
|
||||
self.pending_disconnect = {}
|
||||
|
||||
def set_server(self, server):
|
||||
self.server = server
|
||||
|
||||
def initialize(self):
|
||||
"""Invoked before the first request is received. Subclasses can add
|
||||
their initialization code here.
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_namespaces(self):
|
||||
"""Return an iterable with the active namespace names."""
|
||||
return six.iterkeys(self.rooms)
|
||||
|
||||
def get_participants(self, namespace, room):
|
||||
"""Return an iterable with the active participants in a room."""
|
||||
for sid, active in six.iteritems(self.rooms[namespace][room].copy()):
|
||||
yield sid
|
||||
|
||||
def connect(self, sid, namespace):
|
||||
"""Register a client connection to a namespace."""
|
||||
self.enter_room(sid, namespace, None)
|
||||
self.enter_room(sid, namespace, sid)
|
||||
|
||||
def is_connected(self, sid, namespace):
|
||||
if namespace in self.pending_disconnect and \
|
||||
sid in self.pending_disconnect[namespace]:
|
||||
# the client is in the process of being disconnected
|
||||
return False
|
||||
try:
|
||||
return self.rooms[namespace][None][sid]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def pre_disconnect(self, sid, namespace):
|
||||
"""Put the client in the to-be-disconnected list.
|
||||
|
||||
This allows the client data structures to be present while the
|
||||
disconnect handler is invoked, but still recognize the fact that the
|
||||
client is soon going away.
|
||||
"""
|
||||
if namespace not in self.pending_disconnect:
|
||||
self.pending_disconnect[namespace] = []
|
||||
self.pending_disconnect[namespace].append(sid)
|
||||
|
||||
def disconnect(self, sid, namespace):
|
||||
"""Register a client disconnect from a namespace."""
|
||||
if namespace not in self.rooms:
|
||||
return
|
||||
rooms = []
|
||||
for room_name, room in six.iteritems(self.rooms[namespace].copy()):
|
||||
if sid in room:
|
||||
rooms.append(room_name)
|
||||
for room in rooms:
|
||||
self.leave_room(sid, namespace, room)
|
||||
if sid in self.callbacks and namespace in self.callbacks[sid]:
|
||||
del self.callbacks[sid][namespace]
|
||||
if len(self.callbacks[sid]) == 0:
|
||||
del self.callbacks[sid]
|
||||
if namespace in self.pending_disconnect and \
|
||||
sid in self.pending_disconnect[namespace]:
|
||||
self.pending_disconnect[namespace].remove(sid)
|
||||
if len(self.pending_disconnect[namespace]) == 0:
|
||||
del self.pending_disconnect[namespace]
|
||||
|
||||
def enter_room(self, sid, namespace, room):
|
||||
"""Add a client to a room."""
|
||||
if namespace not in self.rooms:
|
||||
self.rooms[namespace] = {}
|
||||
if room not in self.rooms[namespace]:
|
||||
self.rooms[namespace][room] = {}
|
||||
self.rooms[namespace][room][sid] = True
|
||||
|
||||
def leave_room(self, sid, namespace, room):
|
||||
"""Remove a client from a room."""
|
||||
try:
|
||||
del self.rooms[namespace][room][sid]
|
||||
if len(self.rooms[namespace][room]) == 0:
|
||||
del self.rooms[namespace][room]
|
||||
if len(self.rooms[namespace]) == 0:
|
||||
del self.rooms[namespace]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def close_room(self, room, namespace):
|
||||
"""Remove all participants from a room."""
|
||||
try:
|
||||
for sid in self.get_participants(namespace, room):
|
||||
self.leave_room(sid, namespace, room)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def get_rooms(self, sid, namespace):
|
||||
"""Return the rooms a client is in."""
|
||||
r = []
|
||||
try:
|
||||
for room_name, room in six.iteritems(self.rooms[namespace]):
|
||||
if room_name is not None and sid in room and room[sid]:
|
||||
r.append(room_name)
|
||||
except KeyError:
|
||||
pass
|
||||
return r
|
||||
|
||||
def emit(self, event, data, namespace, room=None, skip_sid=None,
|
||||
callback=None, **kwargs):
|
||||
"""Emit a message to a single client, a room, or all the clients
|
||||
connected to the namespace."""
|
||||
if namespace not in self.rooms or room not in self.rooms[namespace]:
|
||||
return
|
||||
if not isinstance(skip_sid, list):
|
||||
skip_sid = [skip_sid]
|
||||
for sid in self.get_participants(namespace, room):
|
||||
if sid not in skip_sid:
|
||||
if callback is not None:
|
||||
id = self._generate_ack_id(sid, namespace, callback)
|
||||
else:
|
||||
id = None
|
||||
self.server._emit_internal(sid, event, data, namespace, id)
|
||||
|
||||
def trigger_callback(self, sid, namespace, id, data):
|
||||
"""Invoke an application callback."""
|
||||
callback = None
|
||||
try:
|
||||
callback = self.callbacks[sid][namespace][id]
|
||||
except KeyError:
|
||||
# if we get an unknown callback we just ignore it
|
||||
self._get_logger().warning('Unknown callback received, ignoring.')
|
||||
else:
|
||||
del self.callbacks[sid][namespace][id]
|
||||
if callback is not None:
|
||||
callback(*data)
|
||||
|
||||
def _generate_ack_id(self, sid, namespace, callback):
|
||||
"""Generate a unique identifier for an ACK packet."""
|
||||
namespace = namespace or '/'
|
||||
if sid not in self.callbacks:
|
||||
self.callbacks[sid] = {}
|
||||
if namespace not in self.callbacks[sid]:
|
||||
self.callbacks[sid][namespace] = {0: itertools.count(1)}
|
||||
id = six.next(self.callbacks[sid][namespace][0])
|
||||
self.callbacks[sid][namespace][id] = callback
|
||||
return id
|
||||
|
||||
def _get_logger(self):
|
||||
"""Get the appropriate logger
|
||||
|
||||
Prevents uninitialized servers in write-only mode from failing.
|
||||
"""
|
||||
|
||||
if self.logger:
|
||||
return self.logger
|
||||
elif self.server:
|
||||
return self.server.logger
|
||||
else:
|
||||
return default_logger
|
@ -0,0 +1,620 @@
|
||||
import itertools
|
||||
import logging
|
||||
import random
|
||||
import signal
|
||||
|
||||
import engineio
|
||||
import six
|
||||
|
||||
from . import exceptions
|
||||
from . import namespace
|
||||
from . import packet
|
||||
|
||||
default_logger = logging.getLogger('socketio.client')
|
||||
reconnecting_clients = []
|
||||
|
||||
|
||||
def signal_handler(sig, frame): # pragma: no cover
|
||||
"""SIGINT handler.
|
||||
|
||||
Notify any clients that are in a reconnect loop to abort. Other
|
||||
disconnection tasks are handled at the engine.io level.
|
||||
"""
|
||||
for client in reconnecting_clients[:]:
|
||||
client._reconnect_abort.set()
|
||||
return original_signal_handler(sig, frame)
|
||||
|
||||
|
||||
original_signal_handler = signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
|
||||
class Client(object):
|
||||
"""A Socket.IO client.
|
||||
|
||||
This class implements a fully compliant Socket.IO web client with support
|
||||
for websocket and long-polling transports.
|
||||
|
||||
:param reconnection: ``True`` if the client should automatically attempt to
|
||||
reconnect to the server after an interruption, or
|
||||
``False`` to not reconnect. The default is ``True``.
|
||||
:param reconnection_attempts: How many reconnection attempts to issue
|
||||
before giving up, or 0 for infinity attempts.
|
||||
The default is 0.
|
||||
:param reconnection_delay: How long to wait in seconds before the first
|
||||
reconnection attempt. Each successive attempt
|
||||
doubles this delay.
|
||||
:param reconnection_delay_max: The maximum delay between reconnection
|
||||
attempts.
|
||||
:param randomization_factor: Randomization amount for each delay between
|
||||
reconnection attempts. The default is 0.5,
|
||||
which means that each delay is randomly
|
||||
adjusted by +/- 50%.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param binary: ``True`` to support binary payloads, ``False`` to treat all
|
||||
payloads as text. On Python 2, if this is set to ``True``,
|
||||
``unicode`` values are treated as text, and ``str`` and
|
||||
``bytes`` values are treated as binary. This option has no
|
||||
effect on Python 3, where text and binary payloads are
|
||||
always automatically discovered.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
|
||||
The Engine.IO configuration supports the following settings:
|
||||
|
||||
:param request_timeout: A timeout in seconds for requests. The default is
|
||||
5 seconds.
|
||||
:param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
|
||||
skip SSL certificate verification, allowing
|
||||
connections to servers with self signed certificates.
|
||||
The default is ``True``.
|
||||
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
|
||||
a logger object to use. To disable logging set to
|
||||
``False``. The default is ``False``.
|
||||
"""
|
||||
def __init__(self, reconnection=True, reconnection_attempts=0,
|
||||
reconnection_delay=1, reconnection_delay_max=5,
|
||||
randomization_factor=0.5, logger=False, binary=False,
|
||||
json=None, **kwargs):
|
||||
self.reconnection = reconnection
|
||||
self.reconnection_attempts = reconnection_attempts
|
||||
self.reconnection_delay = reconnection_delay
|
||||
self.reconnection_delay_max = reconnection_delay_max
|
||||
self.randomization_factor = randomization_factor
|
||||
self.binary = binary
|
||||
|
||||
engineio_options = kwargs
|
||||
engineio_logger = engineio_options.pop('engineio_logger', None)
|
||||
if engineio_logger is not None:
|
||||
engineio_options['logger'] = engineio_logger
|
||||
if json is not None:
|
||||
packet.Packet.json = json
|
||||
engineio_options['json'] = json
|
||||
|
||||
self.eio = self._engineio_client_class()(**engineio_options)
|
||||
self.eio.on('connect', self._handle_eio_connect)
|
||||
self.eio.on('message', self._handle_eio_message)
|
||||
self.eio.on('disconnect', self._handle_eio_disconnect)
|
||||
|
||||
if not isinstance(logger, bool):
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = default_logger
|
||||
if not logging.root.handlers and \
|
||||
self.logger.level == logging.NOTSET:
|
||||
if logger:
|
||||
self.logger.setLevel(logging.INFO)
|
||||
else:
|
||||
self.logger.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(logging.StreamHandler())
|
||||
|
||||
self.connection_url = None
|
||||
self.connection_headers = None
|
||||
self.connection_transports = None
|
||||
self.connection_namespaces = None
|
||||
self.socketio_path = None
|
||||
self.sid = None
|
||||
|
||||
self.connected = False
|
||||
self.namespaces = []
|
||||
self.handlers = {}
|
||||
self.namespace_handlers = {}
|
||||
self.callbacks = {}
|
||||
self._binary_packet = None
|
||||
self._reconnect_task = None
|
||||
self._reconnect_abort = self.eio.create_event()
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return False
|
||||
|
||||
def on(self, event, handler=None, namespace=None):
|
||||
"""Register an event handler.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param handler: The function that should be invoked to handle the
|
||||
event. When this parameter is not given, the method
|
||||
acts as a decorator for the handler function.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the handler is associated with
|
||||
the default namespace.
|
||||
|
||||
Example usage::
|
||||
|
||||
# as a decorator:
|
||||
@sio.on('connect')
|
||||
def connect_handler():
|
||||
print('Connected!')
|
||||
|
||||
# as a method:
|
||||
def message_handler(msg):
|
||||
print('Received message: ', msg)
|
||||
sio.send( 'response')
|
||||
sio.on('message', message_handler)
|
||||
|
||||
The ``'connect'`` event handler receives no arguments. The
|
||||
``'message'`` handler and handlers for custom event names receive the
|
||||
message payload as only argument. Any values returned from a message
|
||||
handler will be passed to the client's acknowledgement callback
|
||||
function if it exists. The ``'disconnect'`` handler does not take
|
||||
arguments.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
|
||||
def set_handler(handler):
|
||||
if namespace not in self.handlers:
|
||||
self.handlers[namespace] = {}
|
||||
self.handlers[namespace][event] = handler
|
||||
return handler
|
||||
|
||||
if handler is None:
|
||||
return set_handler
|
||||
set_handler(handler)
|
||||
|
||||
def event(self, *args, **kwargs):
|
||||
"""Decorator to register an event handler.
|
||||
|
||||
This is a simplified version of the ``on()`` method that takes the
|
||||
event name from the decorated function.
|
||||
|
||||
Example usage::
|
||||
|
||||
@sio.event
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
|
||||
The above example is equivalent to::
|
||||
|
||||
@sio.on('my_event')
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
|
||||
A custom namespace can be given as an argument to the decorator::
|
||||
|
||||
@sio.event(namespace='/test')
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
"""
|
||||
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
|
||||
# the decorator was invoked without arguments
|
||||
# args[0] is the decorated function
|
||||
return self.on(args[0].__name__)(args[0])
|
||||
else:
|
||||
# the decorator was invoked with arguments
|
||||
def set_handler(handler):
|
||||
return self.on(handler.__name__, *args, **kwargs)(handler)
|
||||
|
||||
return set_handler
|
||||
|
||||
def register_namespace(self, namespace_handler):
|
||||
"""Register a namespace handler object.
|
||||
|
||||
:param namespace_handler: An instance of a :class:`Namespace`
|
||||
subclass that handles all the event traffic
|
||||
for a namespace.
|
||||
"""
|
||||
if not isinstance(namespace_handler, namespace.ClientNamespace):
|
||||
raise ValueError('Not a namespace instance')
|
||||
if self.is_asyncio_based() != namespace_handler.is_asyncio_based():
|
||||
raise ValueError('Not a valid namespace class for this client')
|
||||
namespace_handler._set_client(self)
|
||||
self.namespace_handlers[namespace_handler.namespace] = \
|
||||
namespace_handler
|
||||
|
||||
def connect(self, url, headers={}, transports=None,
|
||||
namespaces=None, socketio_path='socket.io'):
|
||||
"""Connect to a Socket.IO server.
|
||||
|
||||
:param url: The URL of the Socket.IO server. It can include custom
|
||||
query string parameters if required by the server.
|
||||
:param headers: A dictionary with custom headers to send with the
|
||||
connection request.
|
||||
:param transports: The list of allowed transports. Valid transports
|
||||
are ``'polling'`` and ``'websocket'``. If not
|
||||
given, the polling transport is connected first,
|
||||
then an upgrade to websocket is attempted.
|
||||
:param namespaces: The list of custom namespaces to connect, in
|
||||
addition to the default namespace. If not given,
|
||||
the namespace list is obtained from the registered
|
||||
event handlers.
|
||||
:param socketio_path: The endpoint where the Socket.IO server is
|
||||
installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
sio = socketio.Client()
|
||||
sio.connect('http://localhost:5000')
|
||||
"""
|
||||
self.connection_url = url
|
||||
self.connection_headers = headers
|
||||
self.connection_transports = transports
|
||||
self.connection_namespaces = namespaces
|
||||
self.socketio_path = socketio_path
|
||||
|
||||
if namespaces is None:
|
||||
namespaces = set(self.handlers.keys()).union(
|
||||
set(self.namespace_handlers.keys()))
|
||||
elif isinstance(namespaces, six.string_types):
|
||||
namespaces = [namespaces]
|
||||
self.connection_namespaces = namespaces
|
||||
self.namespaces = [n for n in namespaces if n != '/']
|
||||
try:
|
||||
self.eio.connect(url, headers=headers, transports=transports,
|
||||
engineio_path=socketio_path)
|
||||
except engineio.exceptions.ConnectionError as exc:
|
||||
six.raise_from(exceptions.ConnectionError(exc.args[0]), None)
|
||||
self.connected = True
|
||||
|
||||
def wait(self):
|
||||
"""Wait until the connection with the server ends.
|
||||
|
||||
Client applications can use this function to block the main thread
|
||||
during the life of the connection.
|
||||
"""
|
||||
while True:
|
||||
self.eio.wait()
|
||||
self.sleep(1) # give the reconnect task time to start up
|
||||
if not self._reconnect_task:
|
||||
break
|
||||
self._reconnect_task.join()
|
||||
if self.eio.state != 'connected':
|
||||
break
|
||||
|
||||
def emit(self, event, data=None, namespace=None, callback=None):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
if namespace != '/' and namespace not in self.namespaces:
|
||||
raise exceptions.BadNamespaceError(
|
||||
namespace + ' is not a connected namespace.')
|
||||
self.logger.info('Emitting event "%s" [%s]', event, namespace)
|
||||
if callback is not None:
|
||||
id = self._generate_ack_id(namespace, callback)
|
||||
else:
|
||||
id = None
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
# tuples are expanded to multiple arguments, everything else is sent
|
||||
# as a single argument
|
||||
if isinstance(data, tuple):
|
||||
data = list(data)
|
||||
elif data is not None:
|
||||
data = [data]
|
||||
else:
|
||||
data = []
|
||||
self._send_packet(packet.Packet(packet.EVENT, namespace=namespace,
|
||||
data=[event] + data, id=id,
|
||||
binary=binary))
|
||||
|
||||
def send(self, data, namespace=None, callback=None):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
This function emits an event with the name ``'message'``. Use
|
||||
:func:`emit` to issue custom event names.
|
||||
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
"""
|
||||
self.emit('message', data=data, namespace=namespace,
|
||||
callback=callback)
|
||||
|
||||
def call(self, event, data=None, namespace=None, timeout=60):
|
||||
"""Emit a custom event to a client and wait for the response.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param timeout: The waiting timeout. If the timeout is reached before
|
||||
the client acknowledges the event, then a
|
||||
``TimeoutError`` exception is raised.
|
||||
"""
|
||||
callback_event = self.eio.create_event()
|
||||
callback_args = []
|
||||
|
||||
def event_callback(*args):
|
||||
callback_args.append(args)
|
||||
callback_event.set()
|
||||
|
||||
self.emit(event, data=data, namespace=namespace,
|
||||
callback=event_callback)
|
||||
if not callback_event.wait(timeout=timeout):
|
||||
raise exceptions.TimeoutError()
|
||||
return callback_args[0] if len(callback_args[0]) > 1 \
|
||||
else callback_args[0][0] if len(callback_args[0]) == 1 \
|
||||
else None
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from the server."""
|
||||
# here we just request the disconnection
|
||||
# later in _handle_eio_disconnect we invoke the disconnect handler
|
||||
for n in self.namespaces:
|
||||
self._send_packet(packet.Packet(packet.DISCONNECT, namespace=n))
|
||||
self._send_packet(packet.Packet(
|
||||
packet.DISCONNECT, namespace='/'))
|
||||
self.connected = False
|
||||
self.eio.disconnect(abort=True)
|
||||
|
||||
def transport(self):
|
||||
"""Return the name of the transport used by the client.
|
||||
|
||||
The two possible values returned by this function are ``'polling'``
|
||||
and ``'websocket'``.
|
||||
"""
|
||||
return self.eio.transport()
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
return self.eio.start_background_task(target, *args, **kwargs)
|
||||
|
||||
def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self.eio.sleep(seconds)
|
||||
|
||||
def _send_packet(self, pkt):
|
||||
"""Send a Socket.IO packet to the server."""
|
||||
encoded_packet = pkt.encode()
|
||||
if isinstance(encoded_packet, list):
|
||||
binary = False
|
||||
for ep in encoded_packet:
|
||||
self.eio.send(ep, binary=binary)
|
||||
binary = True
|
||||
else:
|
||||
self.eio.send(encoded_packet, binary=False)
|
||||
|
||||
def _generate_ack_id(self, namespace, callback):
|
||||
"""Generate a unique identifier for an ACK packet."""
|
||||
namespace = namespace or '/'
|
||||
if namespace not in self.callbacks:
|
||||
self.callbacks[namespace] = {0: itertools.count(1)}
|
||||
id = six.next(self.callbacks[namespace][0])
|
||||
self.callbacks[namespace][id] = callback
|
||||
return id
|
||||
|
||||
def _handle_connect(self, namespace):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Namespace {} is connected'.format(namespace))
|
||||
self._trigger_event('connect', namespace=namespace)
|
||||
if namespace == '/':
|
||||
for n in self.namespaces:
|
||||
self._send_packet(packet.Packet(packet.CONNECT, namespace=n))
|
||||
elif namespace not in self.namespaces:
|
||||
self.namespaces.append(namespace)
|
||||
|
||||
def _handle_disconnect(self, namespace):
|
||||
if not self.connected:
|
||||
return
|
||||
namespace = namespace or '/'
|
||||
if namespace == '/':
|
||||
for n in self.namespaces:
|
||||
self._trigger_event('disconnect', namespace=n)
|
||||
self.namespaces = []
|
||||
self._trigger_event('disconnect', namespace=namespace)
|
||||
if namespace in self.namespaces:
|
||||
self.namespaces.remove(namespace)
|
||||
if namespace == '/':
|
||||
self.connected = False
|
||||
|
||||
def _handle_event(self, namespace, id, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Received event "%s" [%s]', data[0], namespace)
|
||||
r = self._trigger_event(data[0], namespace, *data[1:])
|
||||
if id is not None:
|
||||
# send ACK packet with the response returned by the handler
|
||||
# tuples are expanded as multiple arguments
|
||||
if r is None:
|
||||
data = []
|
||||
elif isinstance(r, tuple):
|
||||
data = list(r)
|
||||
else:
|
||||
data = [r]
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
self._send_packet(packet.Packet(packet.ACK, namespace=namespace,
|
||||
id=id, data=data, binary=binary))
|
||||
|
||||
def _handle_ack(self, namespace, id, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Received ack [%s]', namespace)
|
||||
callback = None
|
||||
try:
|
||||
callback = self.callbacks[namespace][id]
|
||||
except KeyError:
|
||||
# if we get an unknown callback we just ignore it
|
||||
self.logger.warning('Unknown callback received, ignoring.')
|
||||
else:
|
||||
del self.callbacks[namespace][id]
|
||||
if callback is not None:
|
||||
callback(*data)
|
||||
|
||||
def _handle_error(self, namespace, data):
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('Connection to namespace {} was rejected'.format(
|
||||
namespace))
|
||||
if data is None:
|
||||
data = tuple()
|
||||
elif not isinstance(data, (tuple, list)):
|
||||
data = (data,)
|
||||
self._trigger_event('connect_error', namespace, *data)
|
||||
if namespace in self.namespaces:
|
||||
self.namespaces.remove(namespace)
|
||||
if namespace == '/':
|
||||
self.namespaces = []
|
||||
self.connected = False
|
||||
|
||||
def _trigger_event(self, event, namespace, *args):
|
||||
"""Invoke an application event handler."""
|
||||
# first see if we have an explicit handler for the event
|
||||
if namespace in self.handlers and event in self.handlers[namespace]:
|
||||
return self.handlers[namespace][event](*args)
|
||||
|
||||
# or else, forward the event to a namespace handler if one exists
|
||||
elif namespace in self.namespace_handlers:
|
||||
return self.namespace_handlers[namespace].trigger_event(
|
||||
event, *args)
|
||||
|
||||
def _handle_reconnect(self):
|
||||
self._reconnect_abort.clear()
|
||||
reconnecting_clients.append(self)
|
||||
attempt_count = 0
|
||||
current_delay = self.reconnection_delay
|
||||
while True:
|
||||
delay = current_delay
|
||||
current_delay *= 2
|
||||
if delay > self.reconnection_delay_max:
|
||||
delay = self.reconnection_delay_max
|
||||
delay += self.randomization_factor * (2 * random.random() - 1)
|
||||
self.logger.info(
|
||||
'Connection failed, new attempt in {:.02f} seconds'.format(
|
||||
delay))
|
||||
if self._reconnect_abort.wait(delay):
|
||||
self.logger.info('Reconnect task aborted')
|
||||
break
|
||||
attempt_count += 1
|
||||
try:
|
||||
self.connect(self.connection_url,
|
||||
headers=self.connection_headers,
|
||||
transports=self.connection_transports,
|
||||
namespaces=self.connection_namespaces,
|
||||
socketio_path=self.socketio_path)
|
||||
except (exceptions.ConnectionError, ValueError):
|
||||
pass
|
||||
else:
|
||||
self.logger.info('Reconnection successful')
|
||||
self._reconnect_task = None
|
||||
break
|
||||
if self.reconnection_attempts and \
|
||||
attempt_count >= self.reconnection_attempts:
|
||||
self.logger.info(
|
||||
'Maximum reconnection attempts reached, giving up')
|
||||
break
|
||||
reconnecting_clients.remove(self)
|
||||
|
||||
def _handle_eio_connect(self):
|
||||
"""Handle the Engine.IO connection event."""
|
||||
self.logger.info('Engine.IO connection established')
|
||||
self.sid = self.eio.sid
|
||||
|
||||
def _handle_eio_message(self, data):
|
||||
"""Dispatch Engine.IO messages."""
|
||||
if self._binary_packet:
|
||||
pkt = self._binary_packet
|
||||
if pkt.add_attachment(data):
|
||||
self._binary_packet = None
|
||||
if pkt.packet_type == packet.BINARY_EVENT:
|
||||
self._handle_event(pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
self._handle_ack(pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
pkt = packet.Packet(encoded_packet=data)
|
||||
if pkt.packet_type == packet.CONNECT:
|
||||
self._handle_connect(pkt.namespace)
|
||||
elif pkt.packet_type == packet.DISCONNECT:
|
||||
self._handle_disconnect(pkt.namespace)
|
||||
elif pkt.packet_type == packet.EVENT:
|
||||
self._handle_event(pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.ACK:
|
||||
self._handle_ack(pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.BINARY_EVENT or \
|
||||
pkt.packet_type == packet.BINARY_ACK:
|
||||
self._binary_packet = pkt
|
||||
elif pkt.packet_type == packet.ERROR:
|
||||
self._handle_error(pkt.namespace, pkt.data)
|
||||
else:
|
||||
raise ValueError('Unknown packet type.')
|
||||
|
||||
def _handle_eio_disconnect(self):
|
||||
"""Handle the Engine.IO disconnection event."""
|
||||
self.logger.info('Engine.IO connection dropped')
|
||||
if self.connected:
|
||||
for n in self.namespaces:
|
||||
self._trigger_event('disconnect', namespace=n)
|
||||
self._trigger_event('disconnect', namespace='/')
|
||||
self.namespaces = []
|
||||
self.connected = False
|
||||
self.callbacks = {}
|
||||
self._binary_packet = None
|
||||
self.sid = None
|
||||
if self.eio.state == 'connected' and self.reconnection:
|
||||
self._reconnect_task = self.start_background_task(
|
||||
self._handle_reconnect)
|
||||
|
||||
def _engineio_client_class(self):
|
||||
return engineio.Client
|
@ -0,0 +1,30 @@
|
||||
class SocketIOError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(SocketIOError):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionRefusedError(ConnectionError):
|
||||
"""Connection refused exception.
|
||||
|
||||
This exception can be raised from a connect handler when the connection
|
||||
is not accepted. The positional arguments provided with the exception are
|
||||
returned with the error packet to the client.
|
||||
"""
|
||||
def __init__(self, *args):
|
||||
if len(args) == 0:
|
||||
self.error_args = None
|
||||
elif len(args) == 1 and not isinstance(args[0], list):
|
||||
self.error_args = args[0]
|
||||
else:
|
||||
self.error_args = args
|
||||
|
||||
|
||||
class TimeoutError(SocketIOError):
|
||||
pass
|
||||
|
||||
|
||||
class BadNamespaceError(SocketIOError):
|
||||
pass
|
@ -0,0 +1,63 @@
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
try:
|
||||
import kafka
|
||||
except ImportError:
|
||||
kafka = None
|
||||
|
||||
from .pubsub_manager import PubSubManager
|
||||
|
||||
logger = logging.getLogger('socketio')
|
||||
|
||||
|
||||
class KafkaManager(PubSubManager): # pragma: no cover
|
||||
"""Kafka based client manager.
|
||||
|
||||
This class implements a Kafka backend for event sharing across multiple
|
||||
processes.
|
||||
|
||||
To use a Kafka backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
url = 'kafka://hostname:port'
|
||||
server = socketio.Server(client_manager=socketio.KafkaManager(url))
|
||||
|
||||
:param url: The connection URL for the Kafka server. For a default Kafka
|
||||
store running on the same host, use ``kafka://``.
|
||||
:param channel: The channel name (topic) on which the server sends and
|
||||
receives notifications. Must be the same in all the
|
||||
servers.
|
||||
:param write_only: If set ot ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
"""
|
||||
name = 'kafka'
|
||||
|
||||
def __init__(self, url='kafka://localhost:9092', channel='socketio',
|
||||
write_only=False):
|
||||
if kafka is None:
|
||||
raise RuntimeError('kafka-python package is not installed '
|
||||
'(Run "pip install kafka-python" in your '
|
||||
'virtualenv).')
|
||||
|
||||
super(KafkaManager, self).__init__(channel=channel,
|
||||
write_only=write_only)
|
||||
|
||||
self.kafka_url = url[8:] if url != 'kafka://' else 'localhost:9092'
|
||||
self.producer = kafka.KafkaProducer(bootstrap_servers=self.kafka_url)
|
||||
self.consumer = kafka.KafkaConsumer(self.channel,
|
||||
bootstrap_servers=self.kafka_url)
|
||||
|
||||
def _publish(self, data):
|
||||
self.producer.send(self.channel, value=pickle.dumps(data))
|
||||
self.producer.flush()
|
||||
|
||||
def _kafka_listen(self):
|
||||
for message in self.consumer:
|
||||
yield message
|
||||
|
||||
def _listen(self):
|
||||
for message in self._kafka_listen():
|
||||
if message.topic == self.channel:
|
||||
yield pickle.loads(message.value)
|
@ -0,0 +1,122 @@
|
||||
import pickle
|
||||
import uuid
|
||||
|
||||
try:
|
||||
import kombu
|
||||
except ImportError:
|
||||
kombu = None
|
||||
|
||||
from .pubsub_manager import PubSubManager
|
||||
|
||||
|
||||
class KombuManager(PubSubManager): # pragma: no cover
|
||||
"""Client manager that uses kombu for inter-process messaging.
|
||||
|
||||
This class implements a client manager backend for event sharing across
|
||||
multiple processes, using RabbitMQ, Redis or any other messaging mechanism
|
||||
supported by `kombu <http://kombu.readthedocs.org/en/latest/>`_.
|
||||
|
||||
To use a kombu backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
url = 'amqp://user:password@hostname:port//'
|
||||
server = socketio.Server(client_manager=socketio.KombuManager(url))
|
||||
|
||||
:param url: The connection URL for the backend messaging queue. Example
|
||||
connection URLs are ``'amqp://guest:guest@localhost:5672//'``
|
||||
and ``'redis://localhost:6379/'`` for RabbitMQ and Redis
|
||||
respectively. Consult the `kombu documentation
|
||||
<http://kombu.readthedocs.org/en/latest/userguide\
|
||||
/connections.html#urls>`_ for more on how to construct
|
||||
connection URLs.
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications. Must be the same in all the servers.
|
||||
:param write_only: If set ot ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
:param connection_options: additional keyword arguments to be passed to
|
||||
``kombu.Connection()``.
|
||||
:param exchange_options: additional keyword arguments to be passed to
|
||||
``kombu.Exchange()``.
|
||||
:param queue_options: additional keyword arguments to be passed to
|
||||
``kombu.Queue()``.
|
||||
:param producer_options: additional keyword arguments to be passed to
|
||||
``kombu.Producer()``.
|
||||
"""
|
||||
name = 'kombu'
|
||||
|
||||
def __init__(self, url='amqp://guest:guest@localhost:5672//',
|
||||
channel='socketio', write_only=False, logger=None,
|
||||
connection_options=None, exchange_options=None,
|
||||
queue_options=None, producer_options=None):
|
||||
if kombu is None:
|
||||
raise RuntimeError('Kombu package is not installed '
|
||||
'(Run "pip install kombu" in your '
|
||||
'virtualenv).')
|
||||
super(KombuManager, self).__init__(channel=channel,
|
||||
write_only=write_only,
|
||||
logger=logger)
|
||||
self.url = url
|
||||
self.connection_options = connection_options or {}
|
||||
self.exchange_options = exchange_options or {}
|
||||
self.queue_options = queue_options or {}
|
||||
self.producer_options = producer_options or {}
|
||||
self.producer = self._producer()
|
||||
|
||||
def initialize(self):
|
||||
super(KombuManager, self).initialize()
|
||||
|
||||
monkey_patched = True
|
||||
if self.server.async_mode == 'eventlet':
|
||||
from eventlet.patcher import is_monkey_patched
|
||||
monkey_patched = is_monkey_patched('socket')
|
||||
elif 'gevent' in self.server.async_mode:
|
||||
from gevent.monkey import is_module_patched
|
||||
monkey_patched = is_module_patched('socket')
|
||||
if not monkey_patched:
|
||||
raise RuntimeError(
|
||||
'Kombu requires a monkey patched socket library to work '
|
||||
'with ' + self.server.async_mode)
|
||||
|
||||
def _connection(self):
|
||||
return kombu.Connection(self.url, **self.connection_options)
|
||||
|
||||
def _exchange(self):
|
||||
options = {'type': 'fanout', 'durable': False}
|
||||
options.update(self.exchange_options)
|
||||
return kombu.Exchange(self.channel, **options)
|
||||
|
||||
def _queue(self):
|
||||
queue_name = 'flask-socketio.' + str(uuid.uuid4())
|
||||
options = {'durable': False, 'queue_arguments': {'x-expires': 300000}}
|
||||
options.update(self.queue_options)
|
||||
return kombu.Queue(queue_name, self._exchange(), **options)
|
||||
|
||||
def _producer(self):
|
||||
return self._connection().Producer(exchange=self._exchange(),
|
||||
**self.producer_options)
|
||||
|
||||
def __error_callback(self, exception, interval):
|
||||
self._get_logger().exception('Sleeping {}s'.format(interval))
|
||||
|
||||
def _publish(self, data):
|
||||
connection = self._connection()
|
||||
publish = connection.ensure(self.producer, self.producer.publish,
|
||||
errback=self.__error_callback)
|
||||
publish(pickle.dumps(data))
|
||||
|
||||
def _listen(self):
|
||||
reader_queue = self._queue()
|
||||
|
||||
while True:
|
||||
connection = self._connection().ensure_connection(
|
||||
errback=self.__error_callback)
|
||||
try:
|
||||
with connection.SimpleQueue(reader_queue) as queue:
|
||||
while True:
|
||||
message = queue.get(block=True)
|
||||
message.ack()
|
||||
yield message.payload
|
||||
except connection.connection_errors:
|
||||
self._get_logger().exception("Connection error "
|
||||
"while reading from queue")
|
@ -0,0 +1,42 @@
|
||||
import engineio
|
||||
|
||||
|
||||
class WSGIApp(engineio.WSGIApp):
|
||||
"""WSGI middleware for Socket.IO.
|
||||
|
||||
This middleware dispatches traffic to a Socket.IO application. It can also
|
||||
serve a list of static files to the client, or forward unrelated HTTP
|
||||
traffic to another WSGI application.
|
||||
|
||||
:param socketio_app: The Socket.IO server. Must be an instance of the
|
||||
``socketio.Server`` class.
|
||||
:param wsgi_app: The WSGI app that receives all other traffic.
|
||||
:param static_files: A dictionary with static file mapping rules. See the
|
||||
documentation for details on this argument.
|
||||
:param socketio_path: The endpoint where the Socket.IO application should
|
||||
be installed. The default value is appropriate for
|
||||
most cases.
|
||||
|
||||
Example usage::
|
||||
|
||||
import socketio
|
||||
import eventlet
|
||||
from . import wsgi_app
|
||||
|
||||
sio = socketio.Server()
|
||||
app = socketio.WSGIApp(sio, wsgi_app)
|
||||
eventlet.wsgi.server(eventlet.listen(('', 8000)), app)
|
||||
"""
|
||||
def __init__(self, socketio_app, wsgi_app=None, static_files=None,
|
||||
socketio_path='socket.io'):
|
||||
super(WSGIApp, self).__init__(socketio_app, wsgi_app,
|
||||
static_files=static_files,
|
||||
engineio_path=socketio_path)
|
||||
|
||||
|
||||
class Middleware(WSGIApp):
|
||||
"""This class has been renamed to WSGIApp and is now deprecated."""
|
||||
def __init__(self, socketio_app, wsgi_app=None,
|
||||
socketio_path='socket.io'):
|
||||
super(Middleware, self).__init__(socketio_app, wsgi_app,
|
||||
socketio_path=socketio_path)
|
@ -0,0 +1,191 @@
|
||||
class BaseNamespace(object):
|
||||
def __init__(self, namespace=None):
|
||||
self.namespace = namespace or '/'
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return False
|
||||
|
||||
def trigger_event(self, event, *args):
|
||||
"""Dispatch an event to the proper handler method.
|
||||
|
||||
In the most common usage, this method is not overloaded by subclasses,
|
||||
as it performs the routing of events to methods. However, this
|
||||
method can be overriden if special dispatching rules are needed, or if
|
||||
having a single method that catches all events is desired.
|
||||
"""
|
||||
handler_name = 'on_' + event
|
||||
if hasattr(self, handler_name):
|
||||
return getattr(self, handler_name)(*args)
|
||||
|
||||
|
||||
class Namespace(BaseNamespace):
|
||||
"""Base class for server-side class-based namespaces.
|
||||
|
||||
A class-based namespace is a class that contains all the event handlers
|
||||
for a Socket.IO namespace. The event handlers are methods of the class
|
||||
with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
|
||||
``on_message``, ``on_json``, and so on.
|
||||
|
||||
:param namespace: The Socket.IO namespace to be used with all the event
|
||||
handlers defined in this class. If this argument is
|
||||
omitted, the default namespace is used.
|
||||
"""
|
||||
def __init__(self, namespace=None):
|
||||
super(Namespace, self).__init__(namespace=namespace)
|
||||
self.server = None
|
||||
|
||||
def _set_server(self, server):
|
||||
self.server = server
|
||||
|
||||
def emit(self, event, data=None, room=None, skip_sid=None, namespace=None,
|
||||
callback=None):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
The only difference with the :func:`socketio.Server.emit` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.emit(event, data=data, room=room, skip_sid=skip_sid,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def send(self, data, room=None, skip_sid=None, namespace=None,
|
||||
callback=None):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
The only difference with the :func:`socketio.Server.send` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.send(data, room=room, skip_sid=skip_sid,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def enter_room(self, sid, room, namespace=None):
|
||||
"""Enter a room.
|
||||
|
||||
The only difference with the :func:`socketio.Server.enter_room` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.enter_room(sid, room,
|
||||
namespace=namespace or self.namespace)
|
||||
|
||||
def leave_room(self, sid, room, namespace=None):
|
||||
"""Leave a room.
|
||||
|
||||
The only difference with the :func:`socketio.Server.leave_room` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.leave_room(sid, room,
|
||||
namespace=namespace or self.namespace)
|
||||
|
||||
def close_room(self, room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
The only difference with the :func:`socketio.Server.close_room` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.close_room(room,
|
||||
namespace=namespace or self.namespace)
|
||||
|
||||
def rooms(self, sid, namespace=None):
|
||||
"""Return the rooms a client is in.
|
||||
|
||||
The only difference with the :func:`socketio.Server.rooms` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.rooms(sid, namespace=namespace or self.namespace)
|
||||
|
||||
def get_session(self, sid, namespace=None):
|
||||
"""Return the user session for a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.get_session`
|
||||
method is that when the ``namespace`` argument is not given the
|
||||
namespace associated with the class is used.
|
||||
"""
|
||||
return self.server.get_session(
|
||||
sid, namespace=namespace or self.namespace)
|
||||
|
||||
def save_session(self, sid, session, namespace=None):
|
||||
"""Store the user session for a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.save_session`
|
||||
method is that when the ``namespace`` argument is not given the
|
||||
namespace associated with the class is used.
|
||||
"""
|
||||
return self.server.save_session(
|
||||
sid, session, namespace=namespace or self.namespace)
|
||||
|
||||
def session(self, sid, namespace=None):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
The only difference with the :func:`socketio.Server.session` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.session(sid, namespace=namespace or self.namespace)
|
||||
|
||||
def disconnect(self, sid, namespace=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
The only difference with the :func:`socketio.Server.disconnect` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.server.disconnect(sid,
|
||||
namespace=namespace or self.namespace)
|
||||
|
||||
|
||||
class ClientNamespace(BaseNamespace):
|
||||
"""Base class for client-side class-based namespaces.
|
||||
|
||||
A class-based namespace is a class that contains all the event handlers
|
||||
for a Socket.IO namespace. The event handlers are methods of the class
|
||||
with the prefix ``on_``, such as ``on_connect``, ``on_disconnect``,
|
||||
``on_message``, ``on_json``, and so on.
|
||||
|
||||
:param namespace: The Socket.IO namespace to be used with all the event
|
||||
handlers defined in this class. If this argument is
|
||||
omitted, the default namespace is used.
|
||||
"""
|
||||
def __init__(self, namespace=None):
|
||||
super(ClientNamespace, self).__init__(namespace=namespace)
|
||||
self.client = None
|
||||
|
||||
def _set_client(self, client):
|
||||
self.client = client
|
||||
|
||||
def emit(self, event, data=None, namespace=None, callback=None):
|
||||
"""Emit a custom event to the server.
|
||||
|
||||
The only difference with the :func:`socketio.Client.emit` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.client.emit(event, data=data,
|
||||
namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def send(self, data, room=None, skip_sid=None, namespace=None,
|
||||
callback=None):
|
||||
"""Send a message to the server.
|
||||
|
||||
The only difference with the :func:`socketio.Client.send` method is
|
||||
that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.client.send(data, namespace=namespace or self.namespace,
|
||||
callback=callback)
|
||||
|
||||
def disconnect(self):
|
||||
"""Disconnect from the server.
|
||||
|
||||
The only difference with the :func:`socketio.Client.disconnect` method
|
||||
is that when the ``namespace`` argument is not given the namespace
|
||||
associated with the class is used.
|
||||
"""
|
||||
return self.client.disconnect()
|
@ -0,0 +1,179 @@
|
||||
import functools
|
||||
import json as _json
|
||||
|
||||
import six
|
||||
|
||||
(CONNECT, DISCONNECT, EVENT, ACK, ERROR, BINARY_EVENT, BINARY_ACK) = \
|
||||
(0, 1, 2, 3, 4, 5, 6)
|
||||
packet_names = ['CONNECT', 'DISCONNECT', 'EVENT', 'ACK', 'ERROR',
|
||||
'BINARY_EVENT', 'BINARY_ACK']
|
||||
|
||||
|
||||
class Packet(object):
|
||||
"""Socket.IO packet."""
|
||||
|
||||
# the format of the Socket.IO packet is as follows:
|
||||
#
|
||||
# packet type: 1 byte, values 0-6
|
||||
# num_attachments: ASCII encoded, only if num_attachments != 0
|
||||
# '-': only if num_attachments != 0
|
||||
# namespace: only if namespace != '/'
|
||||
# ',': only if namespace and one of id and data are defined in this packet
|
||||
# id: ASCII encoded, only if id is not None
|
||||
# data: JSON dump of data payload
|
||||
|
||||
json = _json
|
||||
|
||||
def __init__(self, packet_type=EVENT, data=None, namespace=None, id=None,
|
||||
binary=None, encoded_packet=None):
|
||||
self.packet_type = packet_type
|
||||
self.data = data
|
||||
self.namespace = namespace
|
||||
self.id = id
|
||||
if binary or (binary is None and self._data_is_binary(self.data)):
|
||||
if self.packet_type == EVENT:
|
||||
self.packet_type = BINARY_EVENT
|
||||
elif self.packet_type == ACK:
|
||||
self.packet_type = BINARY_ACK
|
||||
else:
|
||||
raise ValueError('Packet does not support binary payload.')
|
||||
self.attachment_count = 0
|
||||
self.attachments = []
|
||||
if encoded_packet:
|
||||
self.attachment_count = self.decode(encoded_packet)
|
||||
|
||||
def encode(self):
|
||||
"""Encode the packet for transmission.
|
||||
|
||||
If the packet contains binary elements, this function returns a list
|
||||
of packets where the first is the original packet with placeholders for
|
||||
the binary components and the remaining ones the binary attachments.
|
||||
"""
|
||||
encoded_packet = six.text_type(self.packet_type)
|
||||
if self.packet_type == BINARY_EVENT or self.packet_type == BINARY_ACK:
|
||||
data, attachments = self._deconstruct_binary(self.data)
|
||||
encoded_packet += six.text_type(len(attachments)) + '-'
|
||||
else:
|
||||
data = self.data
|
||||
attachments = None
|
||||
needs_comma = False
|
||||
if self.namespace is not None and self.namespace != '/':
|
||||
encoded_packet += self.namespace
|
||||
needs_comma = True
|
||||
if self.id is not None:
|
||||
if needs_comma:
|
||||
encoded_packet += ','
|
||||
needs_comma = False
|
||||
encoded_packet += six.text_type(self.id)
|
||||
if data is not None:
|
||||
if needs_comma:
|
||||
encoded_packet += ','
|
||||
encoded_packet += self.json.dumps(data, separators=(',', ':'))
|
||||
if attachments is not None:
|
||||
encoded_packet = [encoded_packet] + attachments
|
||||
return encoded_packet
|
||||
|
||||
def decode(self, encoded_packet):
|
||||
"""Decode a transmitted package.
|
||||
|
||||
The return value indicates how many binary attachment packets are
|
||||
necessary to fully decode the packet.
|
||||
"""
|
||||
ep = encoded_packet
|
||||
try:
|
||||
self.packet_type = int(ep[0:1])
|
||||
except TypeError:
|
||||
self.packet_type = ep
|
||||
ep = ''
|
||||
self.namespace = None
|
||||
self.data = None
|
||||
ep = ep[1:]
|
||||
dash = ep.find('-')
|
||||
attachment_count = 0
|
||||
if dash > 0 and ep[0:dash].isdigit():
|
||||
attachment_count = int(ep[0:dash])
|
||||
ep = ep[dash + 1:]
|
||||
if ep and ep[0:1] == '/':
|
||||
sep = ep.find(',')
|
||||
if sep == -1:
|
||||
self.namespace = ep
|
||||
ep = ''
|
||||
else:
|
||||
self.namespace = ep[0:sep]
|
||||
ep = ep[sep + 1:]
|
||||
q = self.namespace.find('?')
|
||||
if q != -1:
|
||||
self.namespace = self.namespace[0:q]
|
||||
if ep and ep[0].isdigit():
|
||||
self.id = 0
|
||||
while ep and ep[0].isdigit():
|
||||
self.id = self.id * 10 + int(ep[0])
|
||||
ep = ep[1:]
|
||||
if ep:
|
||||
self.data = self.json.loads(ep)
|
||||
return attachment_count
|
||||
|
||||
def add_attachment(self, attachment):
|
||||
if self.attachment_count <= len(self.attachments):
|
||||
raise ValueError('Unexpected binary attachment')
|
||||
self.attachments.append(attachment)
|
||||
if self.attachment_count == len(self.attachments):
|
||||
self.reconstruct_binary(self.attachments)
|
||||
return True
|
||||
return False
|
||||
|
||||
def reconstruct_binary(self, attachments):
|
||||
"""Reconstruct a decoded packet using the given list of binary
|
||||
attachments.
|
||||
"""
|
||||
self.data = self._reconstruct_binary_internal(self.data,
|
||||
self.attachments)
|
||||
|
||||
def _reconstruct_binary_internal(self, data, attachments):
|
||||
if isinstance(data, list):
|
||||
return [self._reconstruct_binary_internal(item, attachments)
|
||||
for item in data]
|
||||
elif isinstance(data, dict):
|
||||
if data.get('_placeholder') and 'num' in data:
|
||||
return attachments[data['num']]
|
||||
else:
|
||||
return {key: self._reconstruct_binary_internal(value,
|
||||
attachments)
|
||||
for key, value in six.iteritems(data)}
|
||||
else:
|
||||
return data
|
||||
|
||||
def _deconstruct_binary(self, data):
|
||||
"""Extract binary components in the packet."""
|
||||
attachments = []
|
||||
data = self._deconstruct_binary_internal(data, attachments)
|
||||
return data, attachments
|
||||
|
||||
def _deconstruct_binary_internal(self, data, attachments):
|
||||
if isinstance(data, six.binary_type):
|
||||
attachments.append(data)
|
||||
return {'_placeholder': True, 'num': len(attachments) - 1}
|
||||
elif isinstance(data, list):
|
||||
return [self._deconstruct_binary_internal(item, attachments)
|
||||
for item in data]
|
||||
elif isinstance(data, dict):
|
||||
return {key: self._deconstruct_binary_internal(value, attachments)
|
||||
for key, value in six.iteritems(data)}
|
||||
else:
|
||||
return data
|
||||
|
||||
def _data_is_binary(self, data):
|
||||
"""Check if the data contains binary components."""
|
||||
if isinstance(data, six.binary_type):
|
||||
return True
|
||||
elif isinstance(data, list):
|
||||
return functools.reduce(
|
||||
lambda a, b: a or b, [self._data_is_binary(item)
|
||||
for item in data], False)
|
||||
elif isinstance(data, dict):
|
||||
return functools.reduce(
|
||||
lambda a, b: a or b, [self._data_is_binary(item)
|
||||
for item in six.itervalues(data)],
|
||||
False)
|
||||
else:
|
||||
return False
|
@ -0,0 +1,154 @@
|
||||
from functools import partial
|
||||
import uuid
|
||||
|
||||
import json
|
||||
import pickle
|
||||
import six
|
||||
|
||||
from .base_manager import BaseManager
|
||||
|
||||
|
||||
class PubSubManager(BaseManager):
|
||||
"""Manage a client list attached to a pub/sub backend.
|
||||
|
||||
This is a base class that enables multiple servers to share the list of
|
||||
clients, with the servers communicating events through a pub/sub backend.
|
||||
The use of a pub/sub backend also allows any client connected to the
|
||||
backend to emit events addressed to Socket.IO clients.
|
||||
|
||||
The actual backends must be implemented by subclasses, this class only
|
||||
provides a pub/sub generic framework.
|
||||
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications.
|
||||
"""
|
||||
name = 'pubsub'
|
||||
|
||||
def __init__(self, channel='socketio', write_only=False, logger=None):
|
||||
super(PubSubManager, self).__init__()
|
||||
self.channel = channel
|
||||
self.write_only = write_only
|
||||
self.host_id = uuid.uuid4().hex
|
||||
self.logger = logger
|
||||
|
||||
def initialize(self):
|
||||
super(PubSubManager, self).initialize()
|
||||
if not self.write_only:
|
||||
self.thread = self.server.start_background_task(self._thread)
|
||||
self._get_logger().info(self.name + ' backend initialized.')
|
||||
|
||||
def emit(self, event, data, namespace=None, room=None, skip_sid=None,
|
||||
callback=None, **kwargs):
|
||||
"""Emit a message to a single client, a room, or all the clients
|
||||
connected to the namespace.
|
||||
|
||||
This method takes care or propagating the message to all the servers
|
||||
that are connected through the message queue.
|
||||
|
||||
The parameters are the same as in :meth:`.Server.emit`.
|
||||
"""
|
||||
if kwargs.get('ignore_queue'):
|
||||
return super(PubSubManager, self).emit(
|
||||
event, data, namespace=namespace, room=room, skip_sid=skip_sid,
|
||||
callback=callback)
|
||||
namespace = namespace or '/'
|
||||
if callback is not None:
|
||||
if self.server is None:
|
||||
raise RuntimeError('Callbacks can only be issued from the '
|
||||
'context of a server.')
|
||||
if room is None:
|
||||
raise ValueError('Cannot use callback without a room set.')
|
||||
id = self._generate_ack_id(room, namespace, callback)
|
||||
callback = (room, namespace, id)
|
||||
else:
|
||||
callback = None
|
||||
self._publish({'method': 'emit', 'event': event, 'data': data,
|
||||
'namespace': namespace, 'room': room,
|
||||
'skip_sid': skip_sid, 'callback': callback,
|
||||
'host_id': self.host_id})
|
||||
|
||||
def close_room(self, room, namespace=None):
|
||||
self._publish({'method': 'close_room', 'room': room,
|
||||
'namespace': namespace or '/'})
|
||||
|
||||
def _publish(self, data):
|
||||
"""Publish a message on the Socket.IO channel.
|
||||
|
||||
This method needs to be implemented by the different subclasses that
|
||||
support pub/sub backends.
|
||||
"""
|
||||
raise NotImplementedError('This method must be implemented in a '
|
||||
'subclass.') # pragma: no cover
|
||||
|
||||
def _listen(self):
|
||||
"""Return the next message published on the Socket.IO channel,
|
||||
blocking until a message is available.
|
||||
|
||||
This method needs to be implemented by the different subclasses that
|
||||
support pub/sub backends.
|
||||
"""
|
||||
raise NotImplementedError('This method must be implemented in a '
|
||||
'subclass.') # pragma: no cover
|
||||
|
||||
def _handle_emit(self, message):
|
||||
# Events with callbacks are very tricky to handle across hosts
|
||||
# Here in the receiving end we set up a local callback that preserves
|
||||
# the callback host and id from the sender
|
||||
remote_callback = message.get('callback')
|
||||
remote_host_id = message.get('host_id')
|
||||
if remote_callback is not None and len(remote_callback) == 3:
|
||||
callback = partial(self._return_callback, remote_host_id,
|
||||
*remote_callback)
|
||||
else:
|
||||
callback = None
|
||||
super(PubSubManager, self).emit(message['event'], message['data'],
|
||||
namespace=message.get('namespace'),
|
||||
room=message.get('room'),
|
||||
skip_sid=message.get('skip_sid'),
|
||||
callback=callback)
|
||||
|
||||
def _handle_callback(self, message):
|
||||
if self.host_id == message.get('host_id'):
|
||||
try:
|
||||
sid = message['sid']
|
||||
namespace = message['namespace']
|
||||
id = message['id']
|
||||
args = message['args']
|
||||
except KeyError:
|
||||
return
|
||||
self.trigger_callback(sid, namespace, id, args)
|
||||
|
||||
def _return_callback(self, host_id, sid, namespace, callback_id, *args):
|
||||
# When an event callback is received, the callback is returned back
|
||||
# the sender, which is identified by the host_id
|
||||
self._publish({'method': 'callback', 'host_id': host_id,
|
||||
'sid': sid, 'namespace': namespace, 'id': callback_id,
|
||||
'args': args})
|
||||
|
||||
def _handle_close_room(self, message):
|
||||
super(PubSubManager, self).close_room(
|
||||
room=message.get('room'), namespace=message.get('namespace'))
|
||||
|
||||
def _thread(self):
|
||||
for message in self._listen():
|
||||
data = None
|
||||
if isinstance(message, dict):
|
||||
data = message
|
||||
else:
|
||||
if isinstance(message, six.binary_type): # pragma: no cover
|
||||
try:
|
||||
data = pickle.loads(message)
|
||||
except:
|
||||
pass
|
||||
if data is None:
|
||||
try:
|
||||
data = json.loads(message)
|
||||
except:
|
||||
pass
|
||||
if data and 'method' in data:
|
||||
if data['method'] == 'emit':
|
||||
self._handle_emit(data)
|
||||
elif data['method'] == 'callback':
|
||||
self._handle_callback(data)
|
||||
elif data['method'] == 'close_room':
|
||||
self._handle_close_room(data)
|
@ -0,0 +1,115 @@
|
||||
import logging
|
||||
import pickle
|
||||
import time
|
||||
|
||||
try:
|
||||
import redis
|
||||
except ImportError:
|
||||
redis = None
|
||||
|
||||
from .pubsub_manager import PubSubManager
|
||||
|
||||
logger = logging.getLogger('socketio')
|
||||
|
||||
|
||||
class RedisManager(PubSubManager): # pragma: no cover
|
||||
"""Redis based client manager.
|
||||
|
||||
This class implements a Redis backend for event sharing across multiple
|
||||
processes. Only kept here as one more example of how to build a custom
|
||||
backend, since the kombu backend is perfectly adequate to support a Redis
|
||||
message queue.
|
||||
|
||||
To use a Redis backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
url = 'redis://hostname:port/0'
|
||||
server = socketio.Server(client_manager=socketio.RedisManager(url))
|
||||
|
||||
:param url: The connection URL for the Redis server. For a default Redis
|
||||
store running on the same host, use ``redis://``.
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications. Must be the same in all the servers.
|
||||
:param write_only: If set ot ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
:param redis_options: additional keyword arguments to be passed to
|
||||
``Redis.from_url()``.
|
||||
"""
|
||||
name = 'redis'
|
||||
|
||||
def __init__(self, url='redis://localhost:6379/0', channel='socketio',
|
||||
write_only=False, logger=None, redis_options=None):
|
||||
if redis is None:
|
||||
raise RuntimeError('Redis package is not installed '
|
||||
'(Run "pip install redis" in your '
|
||||
'virtualenv).')
|
||||
self.redis_url = url
|
||||
self.redis_options = redis_options or {}
|
||||
self._redis_connect()
|
||||
super(RedisManager, self).__init__(channel=channel,
|
||||
write_only=write_only,
|
||||
logger=logger)
|
||||
|
||||
def initialize(self):
|
||||
super(RedisManager, self).initialize()
|
||||
|
||||
monkey_patched = True
|
||||
if self.server.async_mode == 'eventlet':
|
||||
from eventlet.patcher import is_monkey_patched
|
||||
monkey_patched = is_monkey_patched('socket')
|
||||
elif 'gevent' in self.server.async_mode:
|
||||
from gevent.monkey import is_module_patched
|
||||
monkey_patched = is_module_patched('socket')
|
||||
if not monkey_patched:
|
||||
raise RuntimeError(
|
||||
'Redis requires a monkey patched socket library to work '
|
||||
'with ' + self.server.async_mode)
|
||||
|
||||
def _redis_connect(self):
|
||||
self.redis = redis.Redis.from_url(self.redis_url,
|
||||
**self.redis_options)
|
||||
self.pubsub = self.redis.pubsub()
|
||||
|
||||
def _publish(self, data):
|
||||
retry = True
|
||||
while True:
|
||||
try:
|
||||
if not retry:
|
||||
self._redis_connect()
|
||||
return self.redis.publish(self.channel, pickle.dumps(data))
|
||||
except redis.exceptions.ConnectionError:
|
||||
if retry:
|
||||
logger.error('Cannot publish to redis... retrying')
|
||||
retry = False
|
||||
else:
|
||||
logger.error('Cannot publish to redis... giving up')
|
||||
break
|
||||
|
||||
def _redis_listen_with_retries(self):
|
||||
retry_sleep = 1
|
||||
connect = False
|
||||
while True:
|
||||
try:
|
||||
if connect:
|
||||
self._redis_connect()
|
||||
self.pubsub.subscribe(self.channel)
|
||||
for message in self.pubsub.listen():
|
||||
yield message
|
||||
except redis.exceptions.ConnectionError:
|
||||
logger.error('Cannot receive from redis... '
|
||||
'retrying in {} secs'.format(retry_sleep))
|
||||
connect = True
|
||||
time.sleep(retry_sleep)
|
||||
retry_sleep *= 2
|
||||
if retry_sleep > 60:
|
||||
retry_sleep = 60
|
||||
|
||||
def _listen(self):
|
||||
channel = self.channel.encode('utf-8')
|
||||
self.pubsub.subscribe(self.channel)
|
||||
for message in self._redis_listen_with_retries():
|
||||
if message['channel'] == channel and \
|
||||
message['type'] == 'message' and 'data' in message:
|
||||
yield message['data']
|
||||
self.pubsub.unsubscribe(self.channel)
|
@ -0,0 +1,730 @@
|
||||
import logging
|
||||
|
||||
import engineio
|
||||
import six
|
||||
|
||||
from . import base_manager
|
||||
from . import exceptions
|
||||
from . import namespace
|
||||
from . import packet
|
||||
|
||||
default_logger = logging.getLogger('socketio.server')
|
||||
|
||||
|
||||
class Server(object):
|
||||
"""A Socket.IO server.
|
||||
|
||||
This class implements a fully compliant Socket.IO web server with support
|
||||
for websocket and long-polling transports.
|
||||
|
||||
:param client_manager: The client manager instance that will manage the
|
||||
client list. When this is omitted, the client list
|
||||
is stored in an in-memory structure, so the use of
|
||||
multiple connected servers is not possible.
|
||||
:param logger: To enable logging set to ``True`` or pass a logger object to
|
||||
use. To disable logging set to ``False``. The default is
|
||||
``False``.
|
||||
:param binary: ``True`` to support binary payloads, ``False`` to treat all
|
||||
payloads as text. On Python 2, if this is set to ``True``,
|
||||
``unicode`` values are treated as text, and ``str`` and
|
||||
``bytes`` values are treated as binary. This option has no
|
||||
effect on Python 3, where text and binary payloads are
|
||||
always automatically discovered.
|
||||
:param json: An alternative json module to use for encoding and decoding
|
||||
packets. Custom json modules must have ``dumps`` and ``loads``
|
||||
functions that are compatible with the standard library
|
||||
versions.
|
||||
:param async_handlers: If set to ``True``, event handlers for a client are
|
||||
executed in separate threads. To run handlers for a
|
||||
client synchronously, set to ``False``. The default
|
||||
is ``True``.
|
||||
:param always_connect: When set to ``False``, new connections are
|
||||
provisory until the connect handler returns
|
||||
something other than ``False``, at which point they
|
||||
are accepted. When set to ``True``, connections are
|
||||
immediately accepted, and then if the connect
|
||||
handler returns ``False`` a disconnect is issued.
|
||||
Set to ``True`` if you need to emit events from the
|
||||
connect handler and your client is confused when it
|
||||
receives events before the connection acceptance.
|
||||
In any other case use the default of ``False``.
|
||||
:param kwargs: Connection parameters for the underlying Engine.IO server.
|
||||
|
||||
The Engine.IO configuration supports the following settings:
|
||||
|
||||
:param async_mode: The asynchronous model to use. See the Deployment
|
||||
section in the documentation for a description of the
|
||||
available options. Valid async modes are "threading",
|
||||
"eventlet", "gevent" and "gevent_uwsgi". If this
|
||||
argument is not given, "eventlet" is tried first, then
|
||||
"gevent_uwsgi", then "gevent", and finally "threading".
|
||||
The first async mode that has all its dependencies
|
||||
installed is then one that is chosen.
|
||||
:param ping_timeout: The time in seconds that the client waits for the
|
||||
server to respond before disconnecting. The default
|
||||
is 60 seconds.
|
||||
:param ping_interval: The interval in seconds at which the client pings
|
||||
the server. The default is 25 seconds.
|
||||
:param max_http_buffer_size: The maximum size of a message when using the
|
||||
polling transport. The default is 100,000,000
|
||||
bytes.
|
||||
:param allow_upgrades: Whether to allow transport upgrades or not. The
|
||||
default is ``True``.
|
||||
:param http_compression: Whether to compress packages when using the
|
||||
polling transport. The default is ``True``.
|
||||
:param compression_threshold: Only compress messages when their byte size
|
||||
is greater than this value. The default is
|
||||
1024 bytes.
|
||||
:param cookie: Name of the HTTP cookie that contains the client session
|
||||
id. If set to ``None``, a cookie is not sent to the client.
|
||||
The default is ``'io'``.
|
||||
:param cors_allowed_origins: Origin or list of origins that are allowed to
|
||||
connect to this server. Only the same origin
|
||||
is allowed by default. Set this argument to
|
||||
``'*'`` to allow all origins, or to ``[]`` to
|
||||
disable CORS handling.
|
||||
:param cors_credentials: Whether credentials (cookies, authentication) are
|
||||
allowed in requests to this server. The default is
|
||||
``True``.
|
||||
:param monitor_clients: If set to ``True``, a background task will ensure
|
||||
inactive clients are closed. Set to ``False`` to
|
||||
disable the monitoring task (not recommended). The
|
||||
default is ``True``.
|
||||
:param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
|
||||
a logger object to use. To disable logging set to
|
||||
``False``. The default is ``False``.
|
||||
"""
|
||||
def __init__(self, client_manager=None, logger=False, binary=False,
|
||||
json=None, async_handlers=True, always_connect=False,
|
||||
**kwargs):
|
||||
engineio_options = kwargs
|
||||
engineio_logger = engineio_options.pop('engineio_logger', None)
|
||||
if engineio_logger is not None:
|
||||
engineio_options['logger'] = engineio_logger
|
||||
if json is not None:
|
||||
packet.Packet.json = json
|
||||
engineio_options['json'] = json
|
||||
engineio_options['async_handlers'] = False
|
||||
self.eio = self._engineio_server_class()(**engineio_options)
|
||||
self.eio.on('connect', self._handle_eio_connect)
|
||||
self.eio.on('message', self._handle_eio_message)
|
||||
self.eio.on('disconnect', self._handle_eio_disconnect)
|
||||
self.binary = binary
|
||||
|
||||
self.environ = {}
|
||||
self.handlers = {}
|
||||
self.namespace_handlers = {}
|
||||
|
||||
self._binary_packet = {}
|
||||
|
||||
if not isinstance(logger, bool):
|
||||
self.logger = logger
|
||||
else:
|
||||
self.logger = default_logger
|
||||
if not logging.root.handlers and \
|
||||
self.logger.level == logging.NOTSET:
|
||||
if logger:
|
||||
self.logger.setLevel(logging.INFO)
|
||||
else:
|
||||
self.logger.setLevel(logging.ERROR)
|
||||
self.logger.addHandler(logging.StreamHandler())
|
||||
|
||||
if client_manager is None:
|
||||
client_manager = base_manager.BaseManager()
|
||||
self.manager = client_manager
|
||||
self.manager.set_server(self)
|
||||
self.manager_initialized = False
|
||||
|
||||
self.async_handlers = async_handlers
|
||||
self.always_connect = always_connect
|
||||
|
||||
self.async_mode = self.eio.async_mode
|
||||
|
||||
def is_asyncio_based(self):
|
||||
return False
|
||||
|
||||
def on(self, event, handler=None, namespace=None):
|
||||
"""Register an event handler.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param handler: The function that should be invoked to handle the
|
||||
event. When this parameter is not given, the method
|
||||
acts as a decorator for the handler function.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the handler is associated with
|
||||
the default namespace.
|
||||
|
||||
Example usage::
|
||||
|
||||
# as a decorator:
|
||||
@socket_io.on('connect', namespace='/chat')
|
||||
def connect_handler(sid, environ):
|
||||
print('Connection request')
|
||||
if environ['REMOTE_ADDR'] in blacklisted:
|
||||
return False # reject
|
||||
|
||||
# as a method:
|
||||
def message_handler(sid, msg):
|
||||
print('Received message: ', msg)
|
||||
eio.send(sid, 'response')
|
||||
socket_io.on('message', namespace='/chat', message_handler)
|
||||
|
||||
The handler function receives the ``sid`` (session ID) for the
|
||||
client as first argument. The ``'connect'`` event handler receives the
|
||||
WSGI environment as a second argument, and can return ``False`` to
|
||||
reject the connection. The ``'message'`` handler and handlers for
|
||||
custom event names receive the message payload as a second argument.
|
||||
Any values returned from a message handler will be passed to the
|
||||
client's acknowledgement callback function if it exists. The
|
||||
``'disconnect'`` handler does not take a second argument.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
|
||||
def set_handler(handler):
|
||||
if namespace not in self.handlers:
|
||||
self.handlers[namespace] = {}
|
||||
self.handlers[namespace][event] = handler
|
||||
return handler
|
||||
|
||||
if handler is None:
|
||||
return set_handler
|
||||
set_handler(handler)
|
||||
|
||||
def event(self, *args, **kwargs):
|
||||
"""Decorator to register an event handler.
|
||||
|
||||
This is a simplified version of the ``on()`` method that takes the
|
||||
event name from the decorated function.
|
||||
|
||||
Example usage::
|
||||
|
||||
@sio.event
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
|
||||
The above example is equivalent to::
|
||||
|
||||
@sio.on('my_event')
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
|
||||
A custom namespace can be given as an argument to the decorator::
|
||||
|
||||
@sio.event(namespace='/test')
|
||||
def my_event(data):
|
||||
print('Received data: ', data)
|
||||
"""
|
||||
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
|
||||
# the decorator was invoked without arguments
|
||||
# args[0] is the decorated function
|
||||
return self.on(args[0].__name__)(args[0])
|
||||
else:
|
||||
# the decorator was invoked with arguments
|
||||
def set_handler(handler):
|
||||
return self.on(handler.__name__, *args, **kwargs)(handler)
|
||||
|
||||
return set_handler
|
||||
|
||||
def register_namespace(self, namespace_handler):
|
||||
"""Register a namespace handler object.
|
||||
|
||||
:param namespace_handler: An instance of a :class:`Namespace`
|
||||
subclass that handles all the event traffic
|
||||
for a namespace.
|
||||
"""
|
||||
if not isinstance(namespace_handler, namespace.Namespace):
|
||||
raise ValueError('Not a namespace instance')
|
||||
if self.is_asyncio_based() != namespace_handler.is_asyncio_based():
|
||||
raise ValueError('Not a valid namespace class for this server')
|
||||
namespace_handler._set_server(self)
|
||||
self.namespace_handlers[namespace_handler.namespace] = \
|
||||
namespace_handler
|
||||
|
||||
def emit(self, event, data=None, to=None, room=None, skip_sid=None,
|
||||
namespace=None, callback=None, **kwargs):
|
||||
"""Emit a custom event to one or more connected clients.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The recipient of the message. This can be set to the
|
||||
session ID of a client to address only that client, or to
|
||||
to any custom room created by the application to address all
|
||||
the clients in that room, If this argument is omitted the
|
||||
event is broadcasted to all connected clients.
|
||||
:param room: Alias for the ``to`` parameter.
|
||||
:param skip_sid: The session ID of a client to skip when broadcasting
|
||||
to a room or to all clients. This can be used to
|
||||
prevent a message from being sent to the sender. To
|
||||
skip multiple sids, pass a list.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
room = to or room
|
||||
self.logger.info('emitting event "%s" to %s [%s]', event,
|
||||
room or 'all', namespace)
|
||||
self.manager.emit(event, data, namespace, room=room,
|
||||
skip_sid=skip_sid, callback=callback, **kwargs)
|
||||
|
||||
def send(self, data, to=None, room=None, skip_sid=None, namespace=None,
|
||||
callback=None, **kwargs):
|
||||
"""Send a message to one or more connected clients.
|
||||
|
||||
This function emits an event with the name ``'message'``. Use
|
||||
:func:`emit` to issue custom event names.
|
||||
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The recipient of the message. This can be set to the
|
||||
session ID of a client to address only that client, or to
|
||||
to any custom room created by the application to address all
|
||||
the clients in that room, If this argument is omitted the
|
||||
event is broadcasted to all connected clients.
|
||||
:param room: Alias for the ``to`` parameter.
|
||||
:param skip_sid: The session ID of a client to skip when broadcasting
|
||||
to a room or to all clients. This can be used to
|
||||
prevent a message from being sent to the sender. To
|
||||
skip multiple sids, pass a list.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param callback: If given, this function will be called to acknowledge
|
||||
the the client has received the message. The arguments
|
||||
that will be passed to the function are those provided
|
||||
by the client. Callback functions can only be used
|
||||
when addressing an individual client.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
clients directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
"""
|
||||
self.emit('message', data=data, to=to, room=room, skip_sid=skip_sid,
|
||||
namespace=namespace, callback=callback, **kwargs)
|
||||
|
||||
def call(self, event, data=None, to=None, sid=None, namespace=None,
|
||||
timeout=60, **kwargs):
|
||||
"""Emit a custom event to a client and wait for the response.
|
||||
|
||||
:param event: The event name. It can be any string. The event names
|
||||
``'connect'``, ``'message'`` and ``'disconnect'`` are
|
||||
reserved and should not be used.
|
||||
:param data: The data to send to the client or clients. Data can be of
|
||||
type ``str``, ``bytes``, ``list`` or ``dict``. If a
|
||||
``list`` or ``dict``, the data will be serialized as JSON.
|
||||
:param to: The session ID of the recipient client.
|
||||
:param sid: Alias for the ``to`` parameter.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the event is emitted to the
|
||||
default namespace.
|
||||
:param timeout: The waiting timeout. If the timeout is reached before
|
||||
the client acknowledges the event, then a
|
||||
``TimeoutError`` exception is raised.
|
||||
:param ignore_queue: Only used when a message queue is configured. If
|
||||
set to ``True``, the event is emitted to the
|
||||
client directly, without going through the queue.
|
||||
This is more efficient, but only works when a
|
||||
single server process is used. It is recommended
|
||||
to always leave this parameter with its default
|
||||
value of ``False``.
|
||||
"""
|
||||
if not self.async_handlers:
|
||||
raise RuntimeError(
|
||||
'Cannot use call() when async_handlers is False.')
|
||||
callback_event = self.eio.create_event()
|
||||
callback_args = []
|
||||
|
||||
def event_callback(*args):
|
||||
callback_args.append(args)
|
||||
callback_event.set()
|
||||
|
||||
self.emit(event, data=data, room=to or sid, namespace=namespace,
|
||||
callback=event_callback, **kwargs)
|
||||
if not callback_event.wait(timeout=timeout):
|
||||
raise exceptions.TimeoutError()
|
||||
return callback_args[0] if len(callback_args[0]) > 1 \
|
||||
else callback_args[0][0] if len(callback_args[0]) == 1 \
|
||||
else None
|
||||
|
||||
def enter_room(self, sid, room, namespace=None):
|
||||
"""Enter a room.
|
||||
|
||||
This function adds the client to a room. The :func:`emit` and
|
||||
:func:`send` functions can optionally broadcast events to all the
|
||||
clients in a room.
|
||||
|
||||
:param sid: Session ID of the client.
|
||||
:param room: Room name. If the room does not exist it is created.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('%s is entering room %s [%s]', sid, room, namespace)
|
||||
self.manager.enter_room(sid, namespace, room)
|
||||
|
||||
def leave_room(self, sid, room, namespace=None):
|
||||
"""Leave a room.
|
||||
|
||||
This function removes the client from a room.
|
||||
|
||||
:param sid: Session ID of the client.
|
||||
:param room: Room name.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('%s is leaving room %s [%s]', sid, room, namespace)
|
||||
self.manager.leave_room(sid, namespace, room)
|
||||
|
||||
def close_room(self, room, namespace=None):
|
||||
"""Close a room.
|
||||
|
||||
This function removes all the clients from the given room.
|
||||
|
||||
:param room: Room name.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('room %s is closing [%s]', room, namespace)
|
||||
self.manager.close_room(room, namespace)
|
||||
|
||||
def rooms(self, sid, namespace=None):
|
||||
"""Return the rooms a client is in.
|
||||
|
||||
:param sid: Session ID of the client.
|
||||
:param namespace: The Socket.IO namespace for the event. If this
|
||||
argument is omitted the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
return self.manager.get_rooms(sid, namespace)
|
||||
|
||||
def get_session(self, sid, namespace=None):
|
||||
"""Return the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param namespace: The Socket.IO namespace. If this argument is omitted
|
||||
the default namespace is used.
|
||||
|
||||
The return value is a dictionary. Modifications made to this
|
||||
dictionary are not guaranteed to be preserved unless
|
||||
``save_session()`` is called, or when the ``session`` context manager
|
||||
is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
eio_session = self.eio.get_session(sid)
|
||||
return eio_session.setdefault(namespace, {})
|
||||
|
||||
def save_session(self, sid, session, namespace=None):
|
||||
"""Store the user session for a client.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
:param session: The session dictionary.
|
||||
:param namespace: The Socket.IO namespace. If this argument is omitted
|
||||
the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
eio_session = self.eio.get_session(sid)
|
||||
eio_session[namespace] = session
|
||||
|
||||
def session(self, sid, namespace=None):
|
||||
"""Return the user session for a client with context manager syntax.
|
||||
|
||||
:param sid: The session id of the client.
|
||||
|
||||
This is a context manager that returns the user session dictionary for
|
||||
the client. Any changes that are made to this dictionary inside the
|
||||
context manager block are saved back to the session. Example usage::
|
||||
|
||||
@sio.on('connect')
|
||||
def on_connect(sid, environ):
|
||||
username = authenticate_user(environ)
|
||||
if not username:
|
||||
return False
|
||||
with sio.session(sid) as session:
|
||||
session['username'] = username
|
||||
|
||||
@sio.on('message')
|
||||
def on_message(sid, msg):
|
||||
with sio.session(sid) as session:
|
||||
print('received message from ', session['username'])
|
||||
"""
|
||||
class _session_context_manager(object):
|
||||
def __init__(self, server, sid, namespace):
|
||||
self.server = server
|
||||
self.sid = sid
|
||||
self.namespace = namespace
|
||||
self.session = None
|
||||
|
||||
def __enter__(self):
|
||||
self.session = self.server.get_session(sid,
|
||||
namespace=namespace)
|
||||
return self.session
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.server.save_session(sid, self.session,
|
||||
namespace=namespace)
|
||||
|
||||
return _session_context_manager(self, sid, namespace)
|
||||
|
||||
def disconnect(self, sid, namespace=None):
|
||||
"""Disconnect a client.
|
||||
|
||||
:param sid: Session ID of the client.
|
||||
:param namespace: The Socket.IO namespace to disconnect. If this
|
||||
argument is omitted the default namespace is used.
|
||||
"""
|
||||
namespace = namespace or '/'
|
||||
if self.manager.is_connected(sid, namespace=namespace):
|
||||
self.logger.info('Disconnecting %s [%s]', sid, namespace)
|
||||
self.manager.pre_disconnect(sid, namespace=namespace)
|
||||
self._send_packet(sid, packet.Packet(packet.DISCONNECT,
|
||||
namespace=namespace))
|
||||
self._trigger_event('disconnect', namespace, sid)
|
||||
self.manager.disconnect(sid, namespace=namespace)
|
||||
if namespace == '/':
|
||||
self.eio.disconnect(sid)
|
||||
|
||||
def transport(self, sid):
|
||||
"""Return the name of the transport used by the client.
|
||||
|
||||
The two possible values returned by this function are ``'polling'``
|
||||
and ``'websocket'``.
|
||||
|
||||
:param sid: The session of the client.
|
||||
"""
|
||||
return self.eio.transport(sid)
|
||||
|
||||
def handle_request(self, environ, start_response):
|
||||
"""Handle an HTTP request from the client.
|
||||
|
||||
This is the entry point of the Socket.IO application, using the same
|
||||
interface as a WSGI application. For the typical usage, this function
|
||||
is invoked by the :class:`Middleware` instance, but it can be invoked
|
||||
directly when the middleware is not used.
|
||||
|
||||
:param environ: The WSGI environment.
|
||||
:param start_response: The WSGI ``start_response`` function.
|
||||
|
||||
This function returns the HTTP response body to deliver to the client
|
||||
as a byte sequence.
|
||||
"""
|
||||
return self.eio.handle_request(environ, start_response)
|
||||
|
||||
def start_background_task(self, target, *args, **kwargs):
|
||||
"""Start a background task using the appropriate async model.
|
||||
|
||||
This is a utility function that applications can use to start a
|
||||
background task using the method that is compatible with the
|
||||
selected async mode.
|
||||
|
||||
:param target: the target function to execute.
|
||||
:param args: arguments to pass to the function.
|
||||
:param kwargs: keyword arguments to pass to the function.
|
||||
|
||||
This function returns an object compatible with the `Thread` class in
|
||||
the Python standard library. The `start()` method on this object is
|
||||
already called by this function.
|
||||
"""
|
||||
return self.eio.start_background_task(target, *args, **kwargs)
|
||||
|
||||
def sleep(self, seconds=0):
|
||||
"""Sleep for the requested amount of time using the appropriate async
|
||||
model.
|
||||
|
||||
This is a utility function that applications can use to put a task to
|
||||
sleep without having to worry about using the correct call for the
|
||||
selected async mode.
|
||||
"""
|
||||
return self.eio.sleep(seconds)
|
||||
|
||||
def _emit_internal(self, sid, event, data, namespace=None, id=None):
|
||||
"""Send a message to a client."""
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
# tuples are expanded to multiple arguments, everything else is sent
|
||||
# as a single argument
|
||||
if isinstance(data, tuple):
|
||||
data = list(data)
|
||||
else:
|
||||
data = [data]
|
||||
self._send_packet(sid, packet.Packet(packet.EVENT, namespace=namespace,
|
||||
data=[event] + data, id=id,
|
||||
binary=binary))
|
||||
|
||||
def _send_packet(self, sid, pkt):
|
||||
"""Send a Socket.IO packet to a client."""
|
||||
encoded_packet = pkt.encode()
|
||||
if isinstance(encoded_packet, list):
|
||||
binary = False
|
||||
for ep in encoded_packet:
|
||||
self.eio.send(sid, ep, binary=binary)
|
||||
binary = True
|
||||
else:
|
||||
self.eio.send(sid, encoded_packet, binary=False)
|
||||
|
||||
def _handle_connect(self, sid, namespace):
|
||||
"""Handle a client connection request."""
|
||||
namespace = namespace or '/'
|
||||
self.manager.connect(sid, namespace)
|
||||
if self.always_connect:
|
||||
self._send_packet(sid, packet.Packet(packet.CONNECT,
|
||||
namespace=namespace))
|
||||
fail_reason = None
|
||||
try:
|
||||
success = self._trigger_event('connect', namespace, sid,
|
||||
self.environ[sid])
|
||||
except exceptions.ConnectionRefusedError as exc:
|
||||
fail_reason = exc.error_args
|
||||
success = False
|
||||
|
||||
if success is False:
|
||||
if self.always_connect:
|
||||
self.manager.pre_disconnect(sid, namespace)
|
||||
self._send_packet(sid, packet.Packet(
|
||||
packet.DISCONNECT, data=fail_reason, namespace=namespace))
|
||||
self.manager.disconnect(sid, namespace)
|
||||
if not self.always_connect:
|
||||
self._send_packet(sid, packet.Packet(
|
||||
packet.ERROR, data=fail_reason, namespace=namespace))
|
||||
if sid in self.environ: # pragma: no cover
|
||||
del self.environ[sid]
|
||||
elif not self.always_connect:
|
||||
self._send_packet(sid, packet.Packet(packet.CONNECT,
|
||||
namespace=namespace))
|
||||
|
||||
def _handle_disconnect(self, sid, namespace):
|
||||
"""Handle a client disconnect."""
|
||||
namespace = namespace or '/'
|
||||
if namespace == '/':
|
||||
namespace_list = list(self.manager.get_namespaces())
|
||||
else:
|
||||
namespace_list = [namespace]
|
||||
for n in namespace_list:
|
||||
if n != '/' and self.manager.is_connected(sid, n):
|
||||
self._trigger_event('disconnect', n, sid)
|
||||
self.manager.disconnect(sid, n)
|
||||
if namespace == '/' and self.manager.is_connected(sid, namespace):
|
||||
self._trigger_event('disconnect', '/', sid)
|
||||
self.manager.disconnect(sid, '/')
|
||||
|
||||
def _handle_event(self, sid, namespace, id, data):
|
||||
"""Handle an incoming client event."""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('received event "%s" from %s [%s]', data[0], sid,
|
||||
namespace)
|
||||
if not self.manager.is_connected(sid, namespace):
|
||||
self.logger.warning('%s is not connected to namespace %s',
|
||||
sid, namespace)
|
||||
return
|
||||
if self.async_handlers:
|
||||
self.start_background_task(self._handle_event_internal, self, sid,
|
||||
data, namespace, id)
|
||||
else:
|
||||
self._handle_event_internal(self, sid, data, namespace, id)
|
||||
|
||||
def _handle_event_internal(self, server, sid, data, namespace, id):
|
||||
r = server._trigger_event(data[0], namespace, sid, *data[1:])
|
||||
if id is not None:
|
||||
# send ACK packet with the response returned by the handler
|
||||
# tuples are expanded as multiple arguments
|
||||
if r is None:
|
||||
data = []
|
||||
elif isinstance(r, tuple):
|
||||
data = list(r)
|
||||
else:
|
||||
data = [r]
|
||||
if six.PY2 and not self.binary:
|
||||
binary = False # pragma: nocover
|
||||
else:
|
||||
binary = None
|
||||
server._send_packet(sid, packet.Packet(packet.ACK,
|
||||
namespace=namespace,
|
||||
id=id, data=data,
|
||||
binary=binary))
|
||||
|
||||
def _handle_ack(self, sid, namespace, id, data):
|
||||
"""Handle ACK packets from the client."""
|
||||
namespace = namespace or '/'
|
||||
self.logger.info('received ack from %s [%s]', sid, namespace)
|
||||
self.manager.trigger_callback(sid, namespace, id, data)
|
||||
|
||||
def _trigger_event(self, event, namespace, *args):
|
||||
"""Invoke an application event handler."""
|
||||
# first see if we have an explicit handler for the event
|
||||
if namespace in self.handlers and event in self.handlers[namespace]:
|
||||
return self.handlers[namespace][event](*args)
|
||||
|
||||
# or else, forward the event to a namespace handler if one exists
|
||||
elif namespace in self.namespace_handlers:
|
||||
return self.namespace_handlers[namespace].trigger_event(
|
||||
event, *args)
|
||||
|
||||
def _handle_eio_connect(self, sid, environ):
|
||||
"""Handle the Engine.IO connection event."""
|
||||
if not self.manager_initialized:
|
||||
self.manager_initialized = True
|
||||
self.manager.initialize()
|
||||
self.environ[sid] = environ
|
||||
return self._handle_connect(sid, '/')
|
||||
|
||||
def _handle_eio_message(self, sid, data):
|
||||
"""Dispatch Engine.IO messages."""
|
||||
if sid in self._binary_packet:
|
||||
pkt = self._binary_packet[sid]
|
||||
if pkt.add_attachment(data):
|
||||
del self._binary_packet[sid]
|
||||
if pkt.packet_type == packet.BINARY_EVENT:
|
||||
self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
else:
|
||||
pkt = packet.Packet(encoded_packet=data)
|
||||
if pkt.packet_type == packet.CONNECT:
|
||||
self._handle_connect(sid, pkt.namespace)
|
||||
elif pkt.packet_type == packet.DISCONNECT:
|
||||
self._handle_disconnect(sid, pkt.namespace)
|
||||
elif pkt.packet_type == packet.EVENT:
|
||||
self._handle_event(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.ACK:
|
||||
self._handle_ack(sid, pkt.namespace, pkt.id, pkt.data)
|
||||
elif pkt.packet_type == packet.BINARY_EVENT or \
|
||||
pkt.packet_type == packet.BINARY_ACK:
|
||||
self._binary_packet[sid] = pkt
|
||||
elif pkt.packet_type == packet.ERROR:
|
||||
raise ValueError('Unexpected ERROR packet.')
|
||||
else:
|
||||
raise ValueError('Unknown packet type.')
|
||||
|
||||
def _handle_eio_disconnect(self, sid):
|
||||
"""Handle Engine.IO disconnect event."""
|
||||
self._handle_disconnect(sid, '/')
|
||||
if sid in self.environ:
|
||||
del self.environ[sid]
|
||||
|
||||
def _engineio_server_class(self):
|
||||
return engineio.Server
|
@ -0,0 +1,11 @@
|
||||
import sys
|
||||
if sys.version_info >= (3, 5):
|
||||
try:
|
||||
from engineio.async_drivers.tornado import get_tornado_handler as \
|
||||
get_engineio_handler
|
||||
except ImportError: # pragma: no cover
|
||||
get_engineio_handler = None
|
||||
|
||||
|
||||
def get_tornado_handler(socketio_server): # pragma: no cover
|
||||
return get_engineio_handler(socketio_server.eio)
|
@ -0,0 +1,111 @@
|
||||
import pickle
|
||||
import re
|
||||
|
||||
try:
|
||||
import eventlet.green.zmq as zmq
|
||||
except ImportError:
|
||||
zmq = None
|
||||
import six
|
||||
|
||||
from .pubsub_manager import PubSubManager
|
||||
|
||||
|
||||
class ZmqManager(PubSubManager): # pragma: no cover
|
||||
"""zmq based client manager.
|
||||
|
||||
NOTE: this zmq implementation should be considered experimental at this
|
||||
time. At this time, eventlet is required to use zmq.
|
||||
|
||||
This class implements a zmq backend for event sharing across multiple
|
||||
processes. To use a zmq backend, initialize the :class:`Server` instance as
|
||||
follows::
|
||||
|
||||
url = 'zmq+tcp://hostname:port1+port2'
|
||||
server = socketio.Server(client_manager=socketio.ZmqManager(url))
|
||||
|
||||
:param url: The connection URL for the zmq message broker,
|
||||
which will need to be provided and running.
|
||||
:param channel: The channel name on which the server sends and receives
|
||||
notifications. Must be the same in all the servers.
|
||||
:param write_only: If set to ``True``, only initialize to emit events. The
|
||||
default of ``False`` initializes the class for emitting
|
||||
and receiving.
|
||||
|
||||
A zmq message broker must be running for the zmq_manager to work.
|
||||
you can write your own or adapt one from the following simple broker
|
||||
below::
|
||||
|
||||
import zmq
|
||||
|
||||
receiver = zmq.Context().socket(zmq.PULL)
|
||||
receiver.bind("tcp://*:5555")
|
||||
|
||||
publisher = zmq.Context().socket(zmq.PUB)
|
||||
publisher.bind("tcp://*:5556")
|
||||
|
||||
while True:
|
||||
publisher.send(receiver.recv())
|
||||
"""
|
||||
name = 'zmq'
|
||||
|
||||
def __init__(self, url='zmq+tcp://localhost:5555+5556',
|
||||
channel='socketio',
|
||||
write_only=False,
|
||||
logger=None):
|
||||
if zmq is None:
|
||||
raise RuntimeError('zmq package is not installed '
|
||||
'(Run "pip install pyzmq" in your '
|
||||
'virtualenv).')
|
||||
|
||||
r = re.compile(r':\d+\+\d+$')
|
||||
if not (url.startswith('zmq+tcp://') and r.search(url)):
|
||||
raise RuntimeError('unexpected connection string: ' + url)
|
||||
|
||||
url = url.replace('zmq+', '')
|
||||
(sink_url, sub_port) = url.split('+')
|
||||
sink_port = sink_url.split(':')[-1]
|
||||
sub_url = sink_url.replace(sink_port, sub_port)
|
||||
|
||||
sink = zmq.Context().socket(zmq.PUSH)
|
||||
sink.connect(sink_url)
|
||||
|
||||
sub = zmq.Context().socket(zmq.SUB)
|
||||
sub.setsockopt_string(zmq.SUBSCRIBE, u'')
|
||||
sub.connect(sub_url)
|
||||
|
||||
self.sink = sink
|
||||
self.sub = sub
|
||||
self.channel = channel
|
||||
super(ZmqManager, self).__init__(channel=channel,
|
||||
write_only=write_only,
|
||||
logger=logger)
|
||||
|
||||
def _publish(self, data):
|
||||
pickled_data = pickle.dumps(
|
||||
{
|
||||
'type': 'message',
|
||||
'channel': self.channel,
|
||||
'data': data
|
||||
}
|
||||
)
|
||||
return self.sink.send(pickled_data)
|
||||
|
||||
def zmq_listen(self):
|
||||
while True:
|
||||
response = self.sub.recv()
|
||||
if response is not None:
|
||||
yield response
|
||||
|
||||
def _listen(self):
|
||||
for message in self.zmq_listen():
|
||||
if isinstance(message, six.binary_type):
|
||||
try:
|
||||
message = pickle.loads(message)
|
||||
except Exception:
|
||||
pass
|
||||
if isinstance(message, dict) and \
|
||||
message['type'] == 'message' and \
|
||||
message['channel'] == self.channel and \
|
||||
'data' in message:
|
||||
yield message['data']
|
||||
return
|
File diff suppressed because one or more lines are too long
Loading…
Reference in new issue