diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 283e2265..f7cb3d67 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -35,6 +35,10 @@ jobs: runs-on: ${{ matrix.os }} timeout-minutes: 60 steps: + - name: Install sbt + if: contains(runner.os, 'macos') + run: brew install sbt + - name: Checkout current branch (full) uses: actions/checkout@v4 with: @@ -104,6 +108,10 @@ jobs: java: [temurin@17] runs-on: ${{ matrix.os }} steps: + - name: Install sbt + if: contains(runner.os, 'macos') + run: brew install sbt + - name: Checkout current branch (full) uses: actions/checkout@v4 with: @@ -168,13 +176,17 @@ jobs: dependency-submission: name: Submit Dependencies - if: github.event_name != 'pull_request' + if: github.event.repository.fork == false && github.event_name != 'pull_request' strategy: matrix: os: [ubuntu-latest] java: [temurin@17] runs-on: ${{ matrix.os }} steps: + - name: Install sbt + if: contains(runner.os, 'macos') + run: brew install sbt + - name: Checkout current branch (full) uses: actions/checkout@v4 with: diff --git a/.scalafmt.conf b/.scalafmt.conf index 5c03b0ce..50b75f1d 100644 --- a/.scalafmt.conf +++ b/.scalafmt.conf @@ -1,4 +1,4 @@ -version = 3.8.0 +version = 3.8.3 runner.dialect = scala213 style = default diff --git a/build.sbt b/build.sbt index 396cf848..bffe0e89 100644 --- a/build.sbt +++ b/build.sbt @@ -1,9 +1,10 @@ import com.typesafe.tools.mima.core._ -val Scala213 = "2.13.13" +val Scala213 = "2.13.15" inThisBuild( Seq( + startYear := Some(2020), Test / fork := true, developers := List( // your GitHub handle and name @@ -11,20 +12,21 @@ inThisBuild( ), licenses := Seq(License.Apache2), tlBaseVersion := "1.0", - tlSonatypeUseLegacyHost := false, - crossScalaVersions := Seq(Scala213, "3.3.3"), + sonatypeCredentialHost := xerial.sbt.Sonatype.sonatypeLegacy, + crossScalaVersions := Seq(Scala213, "3.3.4"), ThisBuild / scalaVersion := Scala213, githubWorkflowJavaVersions := Seq(JavaSpec.temurin("17")) ) ) -val http4sVersion = "1.0.0-M41" +val http4sVersion = "1.0.0-M42" -val jetty = "12.0.7" +val jetty = "12.0.14" -val netty = "4.1.107.Final" +val netty = "4.1.114.Final" -val munit = "0.7.29" +val munit = "1.0.2" +val munitScalaCheck = "1.0.0" val io_uring = "0.0.25.Final" @@ -47,7 +49,7 @@ lazy val core = project name := "http4s-netty-core", libraryDependencies ++= List( "org.log4s" %% "log4s" % "1.10.0", - "co.fs2" %% "fs2-reactive-streams" % "3.9.4", + "co.fs2" %% "fs2-reactive-streams" % "3.11.0", ("org.playframework.netty" % "netty-reactive-streams-http" % "3.0.2") .exclude("io.netty", "netty-codec-http") .exclude("io.netty", "netty-handler"), @@ -72,11 +74,12 @@ lazy val server = project "org.http4s" %% "http4s-dsl" % http4sVersion % Test, "ch.qos.logback" % "logback-classic" % "1.4.5" % Test, "org.scalameta" %% "munit" % munit % Test, - "org.scalameta" %% "munit-scalacheck" % munit % Test, + "org.scalameta" %% "munit-scalacheck" % munitScalaCheck % Test, "org.http4s" %% "http4s-circe" % http4sVersion % Test, "org.http4s" %% "http4s-jdk-http-client" % "1.0.0-M9" % Test, - "org.typelevel" %% "munit-cats-effect" % "2.0.0-M4" % Test + "org.typelevel" %% "munit-cats-effect" % "2.0.0" % Test ), + libraryDependencySchemes += "org.typelevel" %% "munit-cats-effect" % VersionScheme.Always, // "early-semver", libraryDependencies ++= nativeNettyModules, mimaBinaryIssueFilters := Nil ) @@ -97,11 +100,12 @@ lazy val client = project ("com.github.monkeywie" % "proxyee" % "1.7.6" % Test) .excludeAll("io.netty") .excludeAll("org.bouncycastle"), - "com.github.bbottema" % "java-socks-proxy-server" % "3.0.0" % Test, + "com.github.bbottema" % "java-socks-proxy-server" % "4.1.1" % Test, "org.scalameta" %% "munit" % munit % Test, "ch.qos.logback" % "logback-classic" % "1.2.13" % Test, - "org.typelevel" %% "munit-cats-effect" % "2.0.0-M4" % Test + "org.typelevel" %% "munit-cats-effect" % "2.0.0" % Test ), + libraryDependencySchemes += "org.typelevel" %% "munit-cats-effect" % VersionScheme.Always, // "early-semver", libraryDependencies ++= nativeNettyModules ) diff --git a/client/src/main/scala/org/http4s/netty/client/Http4sChannelPoolMap.scala b/client/src/main/scala/org/http4s/netty/client/Http4sChannelPoolMap.scala index d1f1f929..331f3577 100644 --- a/client/src/main/scala/org/http4s/netty/client/Http4sChannelPoolMap.scala +++ b/client/src/main/scala/org/http4s/netty/client/Http4sChannelPoolMap.scala @@ -20,13 +20,14 @@ package client import cats.effect.Async import cats.effect.Resource import cats.effect.std.Dispatcher -import cats.syntax.all._ import fs2.io.net.tls.TLSParameters import io.netty.bootstrap.Bootstrap import io.netty.channel.Channel import io.netty.channel.ChannelFuture +import io.netty.channel.ChannelHandlerContext import io.netty.channel.ChannelInitializer import io.netty.channel.ChannelPipeline +import io.netty.channel.ChannelPromise import io.netty.channel.pool.AbstractChannelPoolHandler import io.netty.channel.pool.AbstractChannelPoolMap import io.netty.channel.pool.ChannelPoolHandler @@ -50,6 +51,7 @@ import org.http4s.client.RequestKey import org.playframework.netty.http.HttpStreamsClientHandler import java.net.ConnectException +import java.util.concurrent.TimeUnit import scala.concurrent.duration.Duration private[client] case class Key(requestKey: RequestKey, version: HttpVersion) @@ -75,15 +77,20 @@ private[client] class Http4sChannelPoolMap[F[_]]( private def endOfPipeline(pipeline: ChannelPipeline): Unit = void { logger.trace("building pipeline / end-of-pipeline") - pipeline.addLast("streaming-handler", new HttpStreamsClientHandler) + pipeline.addLast( + "streaming-handler", + new HttpStreamsClientHandler { + override def close(ctx: ChannelHandlerContext, future: ChannelPromise): Unit = void { + ctx.close(future) + } + }) - if (config.idleTimeout.isFinite && config.idleTimeout.length > 0) { - void( - pipeline - .addLast( - "timeout", - new IdleStateHandler(0, 0, config.idleTimeout.length, config.idleTimeout.unit))) - } + val idletimeout = if (config.idleTimeout.isFinite) config.idleTimeout.toMillis else 0L + val readTimeout = if (config.readTimeout.isFinite) config.readTimeout.toMillis else 0L + + pipeline.addLast( + "timeout", + new IdleStateHandler(readTimeout, 0, idletimeout, TimeUnit.MILLISECONDS)) } private def connectAndConfigure(key: Key): Resource[F, Channel] = { @@ -251,16 +258,9 @@ private[client] object Http4sChannelPoolMap { proxy: Option[Proxy], sslConfig: SSLContextOption, http2: Boolean, - defaultRequestHeaders: Headers + defaultRequestHeaders: Headers, + readTimeout: Duration ) - private[client] def fromFuture[F[_]: Async, A](future: => Future[A]): F[A] = - Async[F].async { callback => - val fut = future - void( - fut - .addListener((f: Future[A]) => - if (f.isSuccess) callback(Right(f.getNow)) else callback(Left(f.cause())))) - Async[F].delay(Some(Async[F].delay(fut.cancel(false)).void)) - } + private[client] def fromFuture[F[_]: Async, A](future: => Future[A]): F[A] = ??? } diff --git a/client/src/main/scala/org/http4s/netty/client/Http4sHandler.scala b/client/src/main/scala/org/http4s/netty/client/Http4sHandler.scala index 52f8cec8..549aa790 100644 --- a/client/src/main/scala/org/http4s/netty/client/Http4sHandler.scala +++ b/client/src/main/scala/org/http4s/netty/client/Http4sHandler.scala @@ -24,6 +24,7 @@ import cats.effect.std.Dispatcher import cats.syntax.all._ import io.netty.channel._ import io.netty.handler.codec.http.HttpResponse +import io.netty.handler.timeout.IdleState import io.netty.handler.timeout.IdleStateEvent import org.http4s._ import org.http4s.netty.client.Http4sHandler.logger @@ -38,10 +39,11 @@ import scala.util.Success private[netty] class Http4sHandler[F[_]](dispatcher: Dispatcher[F])(implicit F: Async[F]) extends ChannelInboundHandlerAdapter { + type Promise = Either[Throwable, Resource[F, Response[F]]] => Unit private val modelConversion = new NettyModelConversion[F] private val promises = - collection.mutable.Queue[Either[Throwable, Resource[F, Response[F]]] => Unit]() + collection.mutable.Queue[Promise]() // By using the Netty event loop assigned to this channel we get two benefits: // 1. We can avoid the necessary hopping around of threads since Netty pipelines will // only pass events up and down from within the event loop to which it is assigned. @@ -51,6 +53,7 @@ private[netty] class Http4sHandler[F[_]](dispatcher: Dispatcher[F])(implicit F: private var eventLoopContext: ExecutionContext = _ private var pending: Future[Unit] = Future.unit + private var inFlight: Option[Promise] = None private def write2(request: Request[F], channel: Channel, key: Key): F[Unit] = { import io.netty.handler.codec.http2._ @@ -145,6 +148,8 @@ private[netty] class Http4sHandler[F[_]](dispatcher: Dispatcher[F])(implicit F: override def isSharable: Boolean = false override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = void { + implicit val ec: ExecutionContext = eventLoopContext + msg match { case h: HttpResponse => val responseResourceF = modelConversion @@ -154,17 +159,25 @@ private[netty] class Http4sHandler[F[_]](dispatcher: Dispatcher[F])(implicit F: } val result = dispatcher.unsafeToFuture(responseResourceF) - pending = pending.flatMap { _ => + if (promises.nonEmpty) { val promise = promises.dequeue() - result.transform { - case Failure(exception) => - promise(Left(exception)) - Failure(exception) - case Success(res) => - promise(Right(res)) - Success(()) - }(eventLoopContext) - }(eventLoopContext) + inFlight = Some(promise) + logger.trace("dequeuing promise") + pending = pending.flatMap { _ => + result.transform { + case Failure(exception) => + logger.trace("handling promise failure") + promise(Left(exception)) + inFlight = None + Failure(exception) + case Success(res) => + logger.trace("handling promise success") + promise(Right(res)) + inFlight = None + Success(()) + } + } + } case _ => super.channelRead(ctx, msg) } @@ -193,17 +206,31 @@ private[netty] class Http4sHandler[F[_]](dispatcher: Dispatcher[F])(implicit F: } private def onException(channel: Channel, e: Throwable): Unit = void { - promises.foreach(cb => cb(Left(e))) - promises.clear() + implicit val ec: ExecutionContext = eventLoopContext + + val allPromises = + (inFlight.toList ++ promises.dequeueAll(_ => true)).map(promise => Future(promise(Left(e)))) + logger.trace(s"onException: dequeueAll(${allPromises.size})") + pending = pending.flatMap(_ => Future.sequence(allPromises).map(_ => ())) + inFlight = None + channel.close() } override def userEventTriggered(ctx: ChannelHandlerContext, evt: scala.Any): Unit = void { evt match { - case _: IdleStateEvent if ctx.channel().isOpen => - val message = s"Closing connection due to idle timeout" - logger.trace(message) - onException(ctx.channel(), new TimeoutException(message)) + case e: IdleStateEvent if ctx.channel().isOpen => + val state = e.state() + state match { + case IdleState.READER_IDLE => + val message = "Timing out request due to missing read" + onException(ctx.channel(), new TimeoutException(message)) + case IdleState.WRITER_IDLE => () + case IdleState.ALL_IDLE => + val message = "Closing connection due to idle timeout" + logger.trace(message) + onException(ctx.channel(), new TimeoutException(message)) + } case _ => super.userEventTriggered(ctx, evt) } } diff --git a/client/src/main/scala/org/http4s/netty/client/NettyClientBuilder.scala b/client/src/main/scala/org/http4s/netty/client/NettyClientBuilder.scala index 2c1fad0e..62326d7a 100644 --- a/client/src/main/scala/org/http4s/netty/client/NettyClientBuilder.scala +++ b/client/src/main/scala/org/http4s/netty/client/NettyClientBuilder.scala @@ -29,6 +29,7 @@ import scala.concurrent.duration._ class NettyClientBuilder[F[_]]( idleTimeout: Duration, + readTimeout: Duration, eventLoopThreads: Int, maxInitialLength: Int, maxHeaderSize: Int, @@ -45,6 +46,7 @@ class NettyClientBuilder[F[_]]( private def copy( idleTimeout: Duration = idleTimeout, + readTimeout: Duration = readTimeout, eventLoopThreads: Int = eventLoopThreads, maxInitialLength: Int = maxInitialLength, maxHeaderSize: Int = maxHeaderSize, @@ -59,6 +61,7 @@ class NettyClientBuilder[F[_]]( ): NettyClientBuilder[F] = new NettyClientBuilder[F]( idleTimeout, + readTimeout, eventLoopThreads, maxInitialLength, maxHeaderSize, @@ -79,7 +82,9 @@ class NettyClientBuilder[F[_]]( def withMaxHeaderSize(size: Int): Self = copy(maxHeaderSize = size) def withMaxChunkSize(size: Int): Self = copy(maxChunkSize = size) def withMaxConnectionsPerKey(size: Int): Self = copy(maxConnectionsPerKey = size) - def withIdleTimeout(duration: FiniteDuration): Self = copy(idleTimeout = duration) + + def withIdleTimeout(duration: Duration): Self = copy(idleTimeout = duration) + def withReadTimeout(duration: Duration): Self = copy(readTimeout = duration) def withSSLContext(sslContext: SSLContext): Self = copy(sslContext = SSLContextOption.Provided(sslContext)) @@ -134,7 +139,8 @@ class NettyClientBuilder[F[_]]( proxy, sslContext, http2, - defaultRequestHeaders + defaultRequestHeaders, + readTimeout ) Client[F](new Http4sChannelPoolMap[F](bs, config).run) } @@ -144,6 +150,7 @@ object NettyClientBuilder { def apply[F[_]](implicit F: Async[F]): NettyClientBuilder[F] = new NettyClientBuilder[F]( idleTimeout = 60.seconds, + readTimeout = 60.seconds, eventLoopThreads = 0, maxInitialLength = 4096, maxHeaderSize = 8192, diff --git a/client/src/test/scala/org/http4s/netty/client/HttpProxyTest.scala b/client/src/test/scala/org/http4s/netty/client/HttpProxyTest.scala index 6952e1e9..103cfd78 100644 --- a/client/src/test/scala/org/http4s/netty/client/HttpProxyTest.scala +++ b/client/src/test/scala/org/http4s/netty/client/HttpProxyTest.scala @@ -29,7 +29,6 @@ import org.http4s.Uri import org.http4s.client.testkit.scaffold.ServerScaffold import java.net.ServerSocket -import scala.compat.java8.FutureConverters._ class HttpProxyTest extends IOSuite { @@ -44,12 +43,12 @@ class HttpProxyTest extends IOSuite { val proxy: IOFixture[HttpProxy] = resourceFixture( for { address <- Resource.eval(HttpProxyTest.randomSocketAddress[IO]) - _ <- Resource { + _ <- Resource.make[IO, HttpProxyServer] { val s = new HttpProxyServer() - IO.fromFuture( - IO(toScala(s.startAsync(address.host.toInetAddress.getHostAddress, address.port.value)))) - .as(s -> IO.blocking(s.close())) - } + IO.fromCompletionStage( + IO(s.startAsync(address.host.toInetAddress.getHostAddress, address.port.value))) + .as(s) + }(s => IO.blocking(s.close())) } yield HttpProxy( Uri.Scheme.http, address.host, diff --git a/client/src/test/scala/org/http4s/netty/client/NettyClientIdleTimeoutTest.scala b/client/src/test/scala/org/http4s/netty/client/NettyClientIdleTimeoutTest.scala index 28891d45..6f672f99 100644 --- a/client/src/test/scala/org/http4s/netty/client/NettyClientIdleTimeoutTest.scala +++ b/client/src/test/scala/org/http4s/netty/client/NettyClientIdleTimeoutTest.scala @@ -17,6 +17,7 @@ package org.http4s.netty.client import cats.effect.IO +import cats.syntax.all._ import com.comcast.ip4s._ import munit.catseffect.IOFixture import org.http4s.HttpRoutes @@ -28,26 +29,44 @@ import org.http4s.ember.server.EmberServerBuilder import org.http4s.implicits._ import org.http4s.server.Server +import scala.concurrent.TimeoutException import scala.concurrent.duration._ class NettyClientIdleTimeoutTest extends IOSuite { - override val munitIOTimeout: Duration = 1.minute + private val logger = org.log4s.getLogger - val nettyClient: IOFixture[Client[IO]] = + val nettyIdleClient: IOFixture[Client[IO]] = resourceFixture( NettyClientBuilder[IO] - .withIdleTimeout(2.seconds) + .withIdleTimeout(3.seconds) .resource, "netty client") + val nettyReadTimeoutClient: IOFixture[Client[IO]] = + resourceFixture( + NettyClientBuilder[IO] + .withReadTimeout(3.seconds) + .resource, + "netty client") + + def respond(path: String, sleep: FiniteDuration, value: String): IO[Response[IO]] = + IO(logger.trace(s"server: received /${path} request, sleeping...")) >> + IO.sleep(sleep) >> + IO(logger.trace(s"server: responding with '$value'")) >> Ok(value) + val server: IOFixture[Server] = resourceFixture( EmberServerBuilder .default[IO] .withPort(port"0") .withHttpApp( HttpRoutes - .of[IO] { case GET -> Root / "idle-timeout" => - IO.sleep(30.seconds).as(Response()) + .of[IO] { + case GET -> Root / "idle-timeout" => + respond("idle-timeout", 4.seconds, "Wat") + case GET -> Root / "1" => + respond("1", 5.seconds, "1") + case GET -> Root / "2" => + respond("2", 1.seconds, "2") } .orNotFound ) @@ -55,19 +74,47 @@ class NettyClientIdleTimeoutTest extends IOSuite { "server" ) - List( - (nettyClient, "netty client") - ).foreach { case (client, name) => - test(s"$name fails after idle timeout") { - val s = server() + test("fails after idle timeout") { + val s = server() + + val req = Request[IO](uri = s.baseUri / "idle-timeout") + val response = nettyIdleClient().status(req).attempt + IO.race(response, IO.sleep(5.seconds)).map { + case Left(Left(error: TimeoutException)) => + assertEquals(error.getMessage, "Closing connection due to idle timeout") + case Left(Left(error)) => fail(s"Failed with $error") + case Left(Right(_)) => fail("response available") + case Right(_) => fail("idle timeout wasn't triggered") + } + } + + test("Request A timed out, idle timeout kills connection") { + val s = server() + val c = nettyIdleClient() + + val req1 = Request[IO](uri = s.baseUri / "1") + val req2 = Request[IO](uri = s.baseUri / "2") + for { + error <- c.expect[String](req1).attempt.map(_.leftMap(_.getMessage)) + r2 <- c.expect[String](req2).attempt.map(_.leftMap(_.getMessage)) + } yield { + assertEquals(error, Left("Closing connection due to idle timeout")) + assertEquals(r2, Right("2")) + } + } + + test("Request A timed out, request B receives response B") { + val s = server() + val c = nettyReadTimeoutClient() - val req = Request[IO](uri = s.baseUri / "idle-timeout") - val response = client().run(req).allocated.attempt - IO.race(response, IO.sleep(5.seconds)).map { - case Left(Left(error)) => println(s"response failed, error:"); error.printStackTrace() - case Left(Right(_)) => println("response available") - case Right(_) => fail("idle timeout wasn't triggered") - } + val req1 = Request[IO](uri = s.baseUri / "1") + val req2 = Request[IO](uri = s.baseUri / "2") + for { + error <- c.expect[String](req1).attempt.map(_.leftMap(_.getMessage)) + r2 <- c.expect[String](req2).attempt.map(_.leftMap(_.getMessage)) + } yield { + assertEquals(error, Left("Timing out request due to missing read")) + assertEquals(r2, Right("2")) } } } diff --git a/client/src/test/scala/org/http4s/netty/client/SocksProxyTest.scala b/client/src/test/scala/org/http4s/netty/client/SocksProxyTest.scala index 0f3da954..56df4623 100644 --- a/client/src/test/scala/org/http4s/netty/client/SocksProxyTest.scala +++ b/client/src/test/scala/org/http4s/netty/client/SocksProxyTest.scala @@ -19,7 +19,7 @@ package org.http4s.netty.client import cats.effect.IO import cats.effect.Resource import munit.catseffect.IOFixture -import org.bbottema.javasocksproxyserver.SocksServer +import org.bbottema.javasocksproxyserver.SyncSocksServer import org.http4s.HttpRoutes import org.http4s.Response import org.http4s.Uri @@ -37,11 +37,10 @@ class SocksProxyTest extends IOSuite { val socks: IOFixture[(Socks4, Socks5)] = resourceFixture( for { address <- Resource.eval(HttpProxyTest.randomSocketAddress[IO]) - _ <- Resource { - val s = new SocksServer() - IO.blocking(s.start(address.port.value)) - .map(_ => s -> IO.blocking(s.stop())) - } + _ <- Resource.make[IO, SyncSocksServer] { + val s = new SyncSocksServer() + IO.blocking(s.start(address.port.value)).as(s) + }(s => IO.blocking(s.stop()).void) } yield Socks4(address.host, address.port, None) -> Socks5( address.host, address.port, diff --git a/project/build.properties b/project/build.properties index 04267b14..0b699c30 100644 --- a/project/build.properties +++ b/project/build.properties @@ -1 +1 @@ -sbt.version=1.9.9 +sbt.version=1.10.2 diff --git a/project/plugins.sbt b/project/plugins.sbt index 5799012f..f0c1820b 100644 --- a/project/plugins.sbt +++ b/project/plugins.sbt @@ -1 +1 @@ -addSbtPlugin("org.http4s" % "sbt-http4s-org" % "0.16.3") +addSbtPlugin("org.http4s" % "sbt-http4s-org" % "0.17.3") diff --git a/server/src/main/scala/org/http4s/netty/server/NettyServerBuilder.scala b/server/src/main/scala/org/http4s/netty/server/NettyServerBuilder.scala index cbbbc53f..97a9961e 100644 --- a/server/src/main/scala/org/http4s/netty/server/NettyServerBuilder.scala +++ b/server/src/main/scala/org/http4s/netty/server/NettyServerBuilder.scala @@ -197,6 +197,8 @@ final class NettyServerBuilder[F[_]] private ( def withNettyChannelOptions(opts: NettyChannelOptions): Self = copy(nettyChannelOptions = opts) + def withWebSocketMaxFrameLength(wsMaxFrameLength: Int): Self = + copy(wsMaxFrameLength = wsMaxFrameLength) def withWebSocketCompression: Self = copy(wsCompression = true) def withoutWebSocketCompression: Self = copy(wsCompression = false) diff --git a/server/src/test/scala/org/http4s/netty/server/WebsocketTest.scala b/server/src/test/scala/org/http4s/netty/server/WebsocketTest.scala index 300890ea..71b0957f 100644 --- a/server/src/test/scala/org/http4s/netty/server/WebsocketTest.scala +++ b/server/src/test/scala/org/http4s/netty/server/WebsocketTest.scala @@ -106,7 +106,7 @@ object WebsocketTest { class NettyWebsocketTest extends WebsocketTest( NettyWSClientBuilder[IO].withIdleTimeout(5.seconds).withNioTransport.resource) { - test("send and receive frames in low-level mode".flaky) { + test("send and receive frames in low-level mode") { testLowLevel } } @@ -117,7 +117,7 @@ class JDKClientWebsocketTest testLowLevel } - test("group frames by their `last` attribute in high-level mode") { + test("group frames by their `last` attribute in high-level mode".flaky) { val uri = server() client() .connectHighLevel(WSRequest(uri))