yeandy commented on code in PR #22795: URL: https://github.com/apache/beam/pull/22795#discussion_r950474838
########## sdks/python/apache_beam/ml/inference/pytorch_inference_test.py: ########## @@ -373,6 +373,40 @@ def test_invalid_input_type(self): # pylint: disable=expression-not-assigned pcoll | RunInference(model_handler) + def test_gpu_convert_to_cpu(self): + with self.assertLogs() as log: + with TestPipeline() as pipeline: + examples = torch.from_numpy( + np.array([1, 5, 3, 10], dtype="float32").reshape(-1, 1)) + + state_dict = OrderedDict([('linear.weight', torch.Tensor([[2.0]])), + ('linear.bias', torch.Tensor([0.5]))]) + path = os.path.join(self.tmpdir, 'my_state_dict_path') + torch.save(state_dict, path) + + model_handler = PytorchModelHandlerTensor( + state_dict_path=path, + model_class=PytorchLinearRegression, + model_params={ + 'input_dim': 1, 'output_dim': 1 + }, + device='GPU') + # Upon initialization, device is cuda + self.assertEqual(model_handler._device, torch.device('cuda')) + + pcoll = pipeline | 'start' >> beam.Create(examples) + # pylint: disable=expression-not-assigned + pcoll | RunInference(model_handler) + + # During model loading, device converted to cuda + self.assertEqual(model_handler._device, torch.device('cuda')) + + self.assertIn("INFO:root:Device is set to CUDA", log.output) + self.assertIn( Review Comment: I check for the change [here](https://github.com/apache/beam/pull/22795/files/73c32fd2ec5ef5d5586bf5034c3f0c54323b8b1c#diff-dbfeab547e43f6c55fab43276a2d6fed8222381313e1af9030859ca1c235127cR394-R402). Is this good enough? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@beam.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org