@@ -23,6 +23,8 @@
|
23 | 23 | import java.io.IOException; |
24 | 24 | import java.util.concurrent.TimeUnit; |
25 | 25 | import java.util.logging.Logger; |
| 26 | +import java.util.concurrent.ExecutorService; |
| 27 | +import java.util.concurrent.Executors; |
26 | 28 | |
27 | 29 | /** |
28 | 30 | * Server that manages startup/shutdown of a {@code Greeter} server. |
@@ -31,11 +33,20 @@ public class HelloWorldServer {
|
31 | 33 | private static final Logger logger = Logger.getLogger(HelloWorldServer.class.getName()); |
32 | 34 | |
33 | 35 | private Server server; |
34 | | - |
35 | 36 | private void start() throws IOException { |
36 | 37 | /* The port on which the server should run */ |
37 | 38 | int port = 50051; |
| 39 | +/* |
| 40 | + * By default gRPC uses a global, shared Executor.newCachedThreadPool() for gRPC callbacks into |
| 41 | + * your application. This is convenient, but can cause an excessive number of threads to be |
| 42 | + * created if there are many RPCs. It is often better to limit the number of threads your |
| 43 | + * application uses for processing and let RPCs queue when the CPU is saturated. |
| 44 | + * The appropriate number of threads varies heavily between applications. |
| 45 | + * Async application code generally does not need more threads than CPU cores. |
| 46 | + */ |
| 47 | +ExecutorService executor = Executors.newFixedThreadPool(2); |
38 | 48 | server = Grpc.newServerBuilderForPort(port, InsecureServerCredentials.create()) |
| 49 | + .executor(executor) |
39 | 50 | .addService(new GreeterImpl()) |
40 | 51 | .build() |
41 | 52 | .start(); |
@@ -48,7 +59,12 @@ public void run() {
|
48 | 59 | try { |
49 | 60 | HelloWorldServer.this.stop(); |
50 | 61 | } catch (InterruptedException e) { |
| 62 | +if (server != null) { |
| 63 | +server.shutdownNow(); |
| 64 | + } |
51 | 65 | e.printStackTrace(System.err); |
| 66 | + } finally { |
| 67 | +executor.shutdown(); |
52 | 68 | } |
53 | 69 | System.err.println("*** server shut down"); |
54 | 70 | } |
|