fix
All checks were successful
Deploy Prod / Build (pull_request) Successful in 9s
Deploy Prod / Push (pull_request) Successful in 12s
Deploy Prod / Deploy prod (pull_request) Successful in 10s

This commit is contained in:
Egor Matveev
2024-12-28 22:48:16 +03:00
parent c1249bfcd0
commit 6c6a549aff
2532 changed files with 562109 additions and 1 deletions

View File

@@ -0,0 +1,64 @@
from redis.asyncio.client import Redis, StrictRedis
from redis.asyncio.cluster import RedisCluster
from redis.asyncio.connection import (
BlockingConnectionPool,
Connection,
ConnectionPool,
SSLConnection,
UnixDomainSocketConnection,
)
from redis.asyncio.sentinel import (
Sentinel,
SentinelConnectionPool,
SentinelManagedConnection,
SentinelManagedSSLConnection,
)
from redis.asyncio.utils import from_url
from redis.backoff import default_backoff
from redis.exceptions import (
AuthenticationError,
AuthenticationWrongNumberOfArgsError,
BusyLoadingError,
ChildDeadlockedError,
ConnectionError,
DataError,
InvalidResponse,
OutOfMemoryError,
PubSubError,
ReadOnlyError,
RedisError,
ResponseError,
TimeoutError,
WatchError,
)
__all__ = [
"AuthenticationError",
"AuthenticationWrongNumberOfArgsError",
"BlockingConnectionPool",
"BusyLoadingError",
"ChildDeadlockedError",
"Connection",
"ConnectionError",
"ConnectionPool",
"DataError",
"from_url",
"default_backoff",
"InvalidResponse",
"PubSubError",
"OutOfMemoryError",
"ReadOnlyError",
"Redis",
"RedisCluster",
"RedisError",
"ResponseError",
"Sentinel",
"SentinelConnectionPool",
"SentinelManagedConnection",
"SentinelManagedSSLConnection",
"SSLConnection",
"StrictRedis",
"TimeoutError",
"UnixDomainSocketConnection",
"WatchError",
]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,313 @@
import asyncio
import threading
import uuid
from types import SimpleNamespace
from typing import TYPE_CHECKING, Awaitable, Optional, Union
from redis.exceptions import LockError, LockNotOwnedError
if TYPE_CHECKING:
from redis.asyncio import Redis, RedisCluster
class Lock:
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
lua_release = None
lua_extend = None
lua_reacquire = None
# KEYS[1] - lock name
# ARGV[1] - token
# return 1 if the lock was released, otherwise 0
LUA_RELEASE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('del', KEYS[1])
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - additional milliseconds
# ARGV[3] - "0" if the additional time should be added to the lock's
# existing ttl or "1" if the existing ttl should be replaced
# return 1 if the locks time was extended, otherwise 0
LUA_EXTEND_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
local expiration = redis.call('pttl', KEYS[1])
if not expiration then
expiration = 0
end
if expiration < 0 then
return 0
end
local newttl = ARGV[2]
if ARGV[3] == "0" then
newttl = ARGV[2] + expiration
end
redis.call('pexpire', KEYS[1], newttl)
return 1
"""
# KEYS[1] - lock name
# ARGV[1] - token
# ARGV[2] - milliseconds
# return 1 if the locks time was reacquired, otherwise 0
LUA_REACQUIRE_SCRIPT = """
local token = redis.call('get', KEYS[1])
if not token or token ~= ARGV[1] then
return 0
end
redis.call('pexpire', KEYS[1], ARGV[2])
return 1
"""
def __init__(
self,
redis: Union["Redis", "RedisCluster"],
name: Union[str, bytes, memoryview],
timeout: Optional[float] = None,
sleep: float = 0.1,
blocking: bool = True,
blocking_timeout: Optional[float] = None,
thread_local: bool = True,
):
"""
Create a new Lock instance named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock in seconds.
By default, it will remain locked until release() is called.
``timeout`` can be specified as a float or integer, both representing
the number of seconds to wait.
``sleep`` indicates the amount of time to sleep in seconds per loop
iteration when the lock is in blocking mode and another client is
currently holding the lock.
``blocking`` indicates whether calling ``acquire`` should block until
the lock has been acquired or to fail immediately, causing ``acquire``
to return False and the lock not being acquired. Defaults to True.
Note this value can be overridden by passing a ``blocking``
argument to ``acquire``.
``blocking_timeout`` indicates the maximum amount of time in seconds to
spend trying to acquire the lock. A value of ``None`` indicates
continue trying forever. ``blocking_timeout`` can be specified as a
float or integer, both representing the number of seconds to wait.
``thread_local`` indicates whether the lock token is placed in
thread-local storage. By default, the token is placed in thread local
storage so that a thread only sees its token, not a token set by
another thread. Consider the following timeline:
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
thread-1 sets the token to "abc"
time: 1, thread-2 blocks trying to acquire `my-lock` using the
Lock instance.
time: 5, thread-1 has not yet completed. redis expires the lock
key.
time: 5, thread-2 acquired `my-lock` now that it's available.
thread-2 sets the token to "xyz"
time: 6, thread-1 finishes its work and calls release(). if the
token is *not* stored in thread local storage, then
thread-1 would see the token value as "xyz" and would be
able to successfully release the thread-2's lock.
In some use cases it's necessary to disable thread local storage. For
example, if you have code where one thread acquires a lock and passes
that lock instance to a worker thread to release later. If thread
local storage isn't disabled in this case, the worker thread won't see
the token set by the thread that acquired the lock. Our assumption
is that these cases aren't common and as such default to using
thread local storage.
"""
self.redis = redis
self.name = name
self.timeout = timeout
self.sleep = sleep
self.blocking = blocking
self.blocking_timeout = blocking_timeout
self.thread_local = bool(thread_local)
self.local = threading.local() if self.thread_local else SimpleNamespace()
self.local.token = None
self.register_scripts()
def register_scripts(self):
cls = self.__class__
client = self.redis
if cls.lua_release is None:
cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
if cls.lua_extend is None:
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
if cls.lua_reacquire is None:
cls.lua_reacquire = client.register_script(cls.LUA_REACQUIRE_SCRIPT)
async def __aenter__(self):
if await self.acquire():
return self
raise LockError("Unable to acquire lock within the time specified")
async def __aexit__(self, exc_type, exc_value, traceback):
await self.release()
async def acquire(
self,
blocking: Optional[bool] = None,
blocking_timeout: Optional[float] = None,
token: Optional[Union[str, bytes]] = None,
):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
``blocking_timeout`` specifies the maximum number of seconds to
wait trying to acquire the lock.
``token`` specifies the token value to be used. If provided, token
must be a bytes object or a string that can be encoded to a bytes
object with the default encoding. If a token isn't specified, a UUID
will be generated.
"""
sleep = self.sleep
if token is None:
token = uuid.uuid1().hex.encode()
else:
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
token = encoder.encode(token)
if blocking is None:
blocking = self.blocking
if blocking_timeout is None:
blocking_timeout = self.blocking_timeout
stop_trying_at = None
if blocking_timeout is not None:
stop_trying_at = asyncio.get_running_loop().time() + blocking_timeout
while True:
if await self.do_acquire(token):
self.local.token = token
return True
if not blocking:
return False
next_try_at = asyncio.get_running_loop().time() + sleep
if stop_trying_at is not None and next_try_at > stop_trying_at:
return False
await asyncio.sleep(sleep)
async def do_acquire(self, token: Union[str, bytes]) -> bool:
if self.timeout:
# convert to milliseconds
timeout = int(self.timeout * 1000)
else:
timeout = None
if await self.redis.set(self.name, token, nx=True, px=timeout):
return True
return False
async def locked(self) -> bool:
"""
Returns True if this key is locked by any process, otherwise False.
"""
return await self.redis.get(self.name) is not None
async def owned(self) -> bool:
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = await self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
try:
encoder = self.redis.connection_pool.get_encoder()
except AttributeError:
# Cluster
encoder = self.redis.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and stored_token == self.local.token
def release(self) -> Awaitable[None]:
"""Releases the already acquired lock"""
expected_token = self.local.token
if expected_token is None:
raise LockError("Cannot release an unlocked lock")
self.local.token = None
return self.do_release(expected_token)
async def do_release(self, expected_token: bytes) -> None:
if not bool(
await self.lua_release(
keys=[self.name], args=[expected_token], client=self.redis
)
):
raise LockNotOwnedError("Cannot release a lock that's no longer owned")
def extend(
self, additional_time: float, replace_ttl: bool = False
) -> Awaitable[bool]:
"""
Adds more time to an already acquired lock.
``additional_time`` can be specified as an integer or a float, both
representing the number of seconds to add.
``replace_ttl`` if False (the default), add `additional_time` to
the lock's existing ttl. If True, replace the lock's ttl with
`additional_time`.
"""
if self.local.token is None:
raise LockError("Cannot extend an unlocked lock")
if self.timeout is None:
raise LockError("Cannot extend a lock with no timeout")
return self.do_extend(additional_time, replace_ttl)
async def do_extend(self, additional_time, replace_ttl) -> bool:
additional_time = int(additional_time * 1000)
if not bool(
await self.lua_extend(
keys=[self.name],
args=[self.local.token, additional_time, replace_ttl and "1" or "0"],
client=self.redis,
)
):
raise LockNotOwnedError("Cannot extend a lock that's no longer owned")
return True
def reacquire(self) -> Awaitable[bool]:
"""
Resets a TTL of an already acquired lock back to a timeout value.
"""
if self.local.token is None:
raise LockError("Cannot reacquire an unlocked lock")
if self.timeout is None:
raise LockError("Cannot reacquire a lock with no timeout")
return self.do_reacquire()
async def do_reacquire(self) -> bool:
timeout = int(self.timeout * 1000)
if not bool(
await self.lua_reacquire(
keys=[self.name], args=[self.local.token, timeout], client=self.redis
)
):
raise LockNotOwnedError("Cannot reacquire a lock that's no longer owned")
return True

View File

@@ -0,0 +1,67 @@
from asyncio import sleep
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Tuple, Type, TypeVar
from redis.exceptions import ConnectionError, RedisError, TimeoutError
if TYPE_CHECKING:
from redis.backoff import AbstractBackoff
T = TypeVar("T")
class Retry:
"""Retry a specific number of times after a failure"""
__slots__ = "_backoff", "_retries", "_supported_errors"
def __init__(
self,
backoff: "AbstractBackoff",
retries: int,
supported_errors: Tuple[Type[RedisError], ...] = (
ConnectionError,
TimeoutError,
),
):
"""
Initialize a `Retry` object with a `Backoff` object
that retries a maximum of `retries` times.
`retries` can be negative to retry forever.
You can specify the types of supported errors which trigger
a retry with the `supported_errors` parameter.
"""
self._backoff = backoff
self._retries = retries
self._supported_errors = supported_errors
def update_supported_errors(self, specified_errors: list):
"""
Updates the supported errors with the specified error types
"""
self._supported_errors = tuple(
set(self._supported_errors + tuple(specified_errors))
)
async def call_with_retry(
self, do: Callable[[], Awaitable[T]], fail: Callable[[RedisError], Any]
) -> T:
"""
Execute an operation that might fail and returns its result, or
raise the exception that was thrown depending on the `Backoff` object.
`do`: the operation to call. Expects no argument.
`fail`: the failure handler, expects the last error that was thrown
"""
self._backoff.reset()
failures = 0
while True:
try:
return await do()
except self._supported_errors as error:
failures += 1
await fail(error)
if self._retries >= 0 and failures > self._retries:
raise error
backoff = self._backoff.compute(failures)
if backoff > 0:
await sleep(backoff)

View File

@@ -0,0 +1,383 @@
import asyncio
import random
import weakref
from typing import AsyncIterator, Iterable, Mapping, Optional, Sequence, Tuple, Type
from redis.asyncio.client import Redis
from redis.asyncio.connection import (
Connection,
ConnectionPool,
EncodableT,
SSLConnection,
)
from redis.commands import AsyncSentinelCommands
from redis.exceptions import ConnectionError, ReadOnlyError, ResponseError, TimeoutError
from redis.utils import str_if_bytes
class MasterNotFoundError(ConnectionError):
pass
class SlaveNotFoundError(ConnectionError):
pass
class SentinelManagedConnection(Connection):
def __init__(self, **kwargs):
self.connection_pool = kwargs.pop("connection_pool")
super().__init__(**kwargs)
def __repr__(self):
pool = self.connection_pool
s = (
f"<{self.__class__.__module__}.{self.__class__.__name__}"
f"(service={pool.service_name}"
)
if self.host:
host_info = f",host={self.host},port={self.port}"
s += host_info
return s + ")>"
async def connect_to(self, address):
self.host, self.port = address
await super().connect()
if self.connection_pool.check_connection:
await self.send_command("PING")
if str_if_bytes(await self.read_response()) != "PONG":
raise ConnectionError("PING failed")
async def _connect_retry(self):
if self._reader:
return # already connected
if self.connection_pool.is_master:
await self.connect_to(await self.connection_pool.get_master_address())
else:
async for slave in self.connection_pool.rotate_slaves():
try:
return await self.connect_to(slave)
except ConnectionError:
continue
raise SlaveNotFoundError # Never be here
async def connect(self):
return await self.retry.call_with_retry(
self._connect_retry,
lambda error: asyncio.sleep(0),
)
async def read_response(
self,
disable_decoding: bool = False,
timeout: Optional[float] = None,
*,
disconnect_on_error: Optional[float] = True,
push_request: Optional[bool] = False,
):
try:
return await super().read_response(
disable_decoding=disable_decoding,
timeout=timeout,
disconnect_on_error=disconnect_on_error,
push_request=push_request,
)
except ReadOnlyError:
if self.connection_pool.is_master:
# When talking to a master, a ReadOnlyError when likely
# indicates that the previous master that we're still connected
# to has been demoted to a slave and there's a new master.
# calling disconnect will force the connection to re-query
# sentinel during the next connect() attempt.
await self.disconnect()
raise ConnectionError("The previous master is now a slave")
raise
class SentinelManagedSSLConnection(SentinelManagedConnection, SSLConnection):
pass
class SentinelConnectionPool(ConnectionPool):
"""
Sentinel backed connection pool.
If ``check_connection`` flag is set to True, SentinelManagedConnection
sends a PING command right after establishing the connection.
"""
def __init__(self, service_name, sentinel_manager, **kwargs):
kwargs["connection_class"] = kwargs.get(
"connection_class",
(
SentinelManagedSSLConnection
if kwargs.pop("ssl", False)
else SentinelManagedConnection
),
)
self.is_master = kwargs.pop("is_master", True)
self.check_connection = kwargs.pop("check_connection", False)
super().__init__(**kwargs)
self.connection_kwargs["connection_pool"] = weakref.proxy(self)
self.service_name = service_name
self.sentinel_manager = sentinel_manager
self.master_address = None
self.slave_rr_counter = None
def __repr__(self):
return (
f"<{self.__class__.__module__}.{self.__class__.__name__}"
f"(service={self.service_name}({self.is_master and 'master' or 'slave'}))>"
)
def reset(self):
super().reset()
self.master_address = None
self.slave_rr_counter = None
def owns_connection(self, connection: Connection):
check = not self.is_master or (
self.is_master and self.master_address == (connection.host, connection.port)
)
return check and super().owns_connection(connection)
async def get_master_address(self):
master_address = await self.sentinel_manager.discover_master(self.service_name)
if self.is_master:
if self.master_address != master_address:
self.master_address = master_address
# disconnect any idle connections so that they reconnect
# to the new master the next time that they are used.
await self.disconnect(inuse_connections=False)
return master_address
async def rotate_slaves(self) -> AsyncIterator:
"""Round-robin slave balancer"""
slaves = await self.sentinel_manager.discover_slaves(self.service_name)
if slaves:
if self.slave_rr_counter is None:
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
for _ in range(len(slaves)):
self.slave_rr_counter = (self.slave_rr_counter + 1) % len(slaves)
slave = slaves[self.slave_rr_counter]
yield slave
# Fallback to the master connection
try:
yield await self.get_master_address()
except MasterNotFoundError:
pass
raise SlaveNotFoundError(f"No slave found for {self.service_name!r}")
class Sentinel(AsyncSentinelCommands):
"""
Redis Sentinel cluster client
>>> from redis.sentinel import Sentinel
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
>>> await master.set('foo', 'bar')
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
>>> await slave.get('foo')
b'bar'
``sentinels`` is a list of sentinel nodes. Each node is represented by
a pair (hostname, port).
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
When querying a sentinel, if it doesn't meet this threshold, responses
from that sentinel won't be considered valid.
``sentinel_kwargs`` is a dictionary of connection arguments used when
connecting to sentinel instances. Any argument that can be passed to
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
not specified, any socket_timeout and socket_keepalive options specified
in ``connection_kwargs`` will be used.
``connection_kwargs`` are keyword arguments that will be used when
establishing a connection to a Redis server.
"""
def __init__(
self,
sentinels,
min_other_sentinels=0,
sentinel_kwargs=None,
**connection_kwargs,
):
# if sentinel_kwargs isn't defined, use the socket_* options from
# connection_kwargs
if sentinel_kwargs is None:
sentinel_kwargs = {
k: v for k, v in connection_kwargs.items() if k.startswith("socket_")
}
self.sentinel_kwargs = sentinel_kwargs
self.sentinels = [
Redis(host=hostname, port=port, **self.sentinel_kwargs)
for hostname, port in sentinels
]
self.min_other_sentinels = min_other_sentinels
self.connection_kwargs = connection_kwargs
async def execute_command(self, *args, **kwargs):
"""
Execute Sentinel command in sentinel nodes.
once - If set to True, then execute the resulting command on a single
node at random, rather than across the entire sentinel cluster.
"""
once = bool(kwargs.get("once", False))
if "once" in kwargs.keys():
kwargs.pop("once")
if once:
await random.choice(self.sentinels).execute_command(*args, **kwargs)
else:
tasks = [
asyncio.Task(sentinel.execute_command(*args, **kwargs))
for sentinel in self.sentinels
]
await asyncio.gather(*tasks)
return True
def __repr__(self):
sentinel_addresses = []
for sentinel in self.sentinels:
sentinel_addresses.append(
f"{sentinel.connection_pool.connection_kwargs['host']}:"
f"{sentinel.connection_pool.connection_kwargs['port']}"
)
return (
f"<{self.__class__}.{self.__class__.__name__}"
f"(sentinels=[{','.join(sentinel_addresses)}])>"
)
def check_master_state(self, state: dict, service_name: str) -> bool:
if not state["is_master"] or state["is_sdown"] or state["is_odown"]:
return False
# Check if our sentinel doesn't see other nodes
if state["num-other-sentinels"] < self.min_other_sentinels:
return False
return True
async def discover_master(self, service_name: str):
"""
Asks sentinel servers for the Redis master's address corresponding
to the service labeled ``service_name``.
Returns a pair (address, port) or raises MasterNotFoundError if no
master is found.
"""
collected_errors = list()
for sentinel_no, sentinel in enumerate(self.sentinels):
try:
masters = await sentinel.sentinel_masters()
except (ConnectionError, TimeoutError) as e:
collected_errors.append(f"{sentinel} - {e!r}")
continue
state = masters.get(service_name)
if state and self.check_master_state(state, service_name):
# Put this sentinel at the top of the list
self.sentinels[0], self.sentinels[sentinel_no] = (
sentinel,
self.sentinels[0],
)
return state["ip"], state["port"]
error_info = ""
if len(collected_errors) > 0:
error_info = f" : {', '.join(collected_errors)}"
raise MasterNotFoundError(f"No master found for {service_name!r}{error_info}")
def filter_slaves(
self, slaves: Iterable[Mapping]
) -> Sequence[Tuple[EncodableT, EncodableT]]:
"""Remove slaves that are in an ODOWN or SDOWN state"""
slaves_alive = []
for slave in slaves:
if slave["is_odown"] or slave["is_sdown"]:
continue
slaves_alive.append((slave["ip"], slave["port"]))
return slaves_alive
async def discover_slaves(
self, service_name: str
) -> Sequence[Tuple[EncodableT, EncodableT]]:
"""Returns a list of alive slaves for service ``service_name``"""
for sentinel in self.sentinels:
try:
slaves = await sentinel.sentinel_slaves(service_name)
except (ConnectionError, ResponseError, TimeoutError):
continue
slaves = self.filter_slaves(slaves)
if slaves:
return slaves
return []
def master_for(
self,
service_name: str,
redis_class: Type[Redis] = Redis,
connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool,
**kwargs,
):
"""
Returns a redis client instance for the ``service_name`` master.
A :py:class:`~redis.sentinel.SentinelConnectionPool` class is
used to retrieve the master's address before establishing a new
connection.
NOTE: If the master's address has changed, any cached connections to
the old master are closed.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to
use. The :py:class:`~redis.sentinel.SentinelConnectionPool`
will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = True
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
connection_pool = connection_pool_class(service_name, self, **connection_kwargs)
# The Redis object "owns" the pool
return redis_class.from_pool(connection_pool)
def slave_for(
self,
service_name: str,
redis_class: Type[Redis] = Redis,
connection_pool_class: Type[SentinelConnectionPool] = SentinelConnectionPool,
**kwargs,
):
"""
Returns redis client instance for the ``service_name`` slave(s).
A SentinelConnectionPool class is used to retrieve the slave's
address before establishing a new connection.
By default clients will be a :py:class:`~redis.Redis` instance.
Specify a different class to the ``redis_class`` argument if you
desire something different.
The ``connection_pool_class`` specifies the connection pool to use.
The SentinelConnectionPool will be used by default.
All other keyword arguments are merged with any connection_kwargs
passed to this class and passed to the connection pool as keyword
arguments to be used to initialize Redis connections.
"""
kwargs["is_master"] = False
connection_kwargs = dict(self.connection_kwargs)
connection_kwargs.update(kwargs)
connection_pool = connection_pool_class(service_name, self, **connection_kwargs)
# The Redis object "owns" the pool
return redis_class.from_pool(connection_pool)

View File

@@ -0,0 +1,28 @@
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from redis.asyncio.client import Pipeline, Redis
def from_url(url, **kwargs):
"""
Returns an active Redis client generated from the given database URL.
Will attempt to extract the database id from the path url fragment, if
none is provided.
"""
from redis.asyncio.client import Redis
return Redis.from_url(url, **kwargs)
class pipeline:
def __init__(self, redis_obj: "Redis"):
self.p: "Pipeline" = redis_obj.pipeline()
async def __aenter__(self) -> "Pipeline":
return self.p
async def __aexit__(self, exc_type, exc_value, traceback):
await self.p.execute()
del self.p