[Dy2St][CUDAGraph] Set undefined place for CUDAGraph OP outputs before lowering to avoid unnecessary `memcpy` && Add CUDAGraph unitest by DrRyanHuang · Pull Request #75078 · PaddlePaddle/Paddle

    (%55, %56, %57, %58, %59, %60) = "pd_op.cuda_graph" [id:3213] () {} : () -> gpu_tensor<-1x1024xbf16>, gpu_tensor<-1x2560xbf16>, gpu_tensor<1xi64>, gpu_tensor<1xi64>, gpu_tensor<i64>, gpu_tensor<-1x2048xbf16>
    {
		......
        (%65) = "embedding(phi_kernel)" (%0, %64) {kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"embedding",op_name:"pd_op.embedding",origin_id:3218,padding_idx:-1,sparse:false,stop_gradient:[false]} : (gpu_tensor<-1xi64>, gpu_tensor<103424x1024xbf16>) -> gpu_tensor<-1x1024xbf16>
        (%66, %67, %68) = "rms_norm(phi_kernel)" (%65, <<NULL VALUE>>, <<NULL VALUE>>, %63, <<NULL VALUE>>) {begin_norm_axis:1,epsilon:1e-05,kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"rms_norm",op_name:"pd_op.rms_norm",origin_id:3219,quant_max_bound:0,quant_min_bound:0,quant_round_type:0,quant_scale:-1,stop_gradient:[false,false,false]} : (gpu_tensor<-1x1024xbf16>, <<NULL TYPE>>, <<NULL TYPE>>, gpu_tensor<1024xbf16>, <<NULL TYPE>>) -> gpu_tensor<-1x1024xbf16>, <<NULL TYPE>>, gpu_tensor<-1xf32>
        (%69) = "weight_only_linear(phi_kernel)" (%66, %62, <<NULL VALUE>>, %61) {arch:90,group_size:-1,kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"weight_only_linear",op_name:"pd_op.weight_only_linear",origin_id:3220,stop_gradient:[false],weight_dtype:"int8"} : (gpu_tensor<-1x1024xbf16>, gpu_tensor<2560x1024xi8>, <<NULL TYPE>>, gpu_tensor<2560xbf16>) -> gpu_tensor<-1x2560xbf16>
        (%70) = "shape64(phi_kernel)" (%69) {kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"shape64",op_name:"pd_op.shape64",origin_id:3221,stop_gradient:[true]} : (gpu_tensor<-1x2560xbf16>) -> cpu_tensor<2xi64>
        (%71) = "full_int_array(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full_int_array",op_name:"pd_op.full_int_array",origin_id:3222,place:Place(cpu),stop_gradient:[true],value:[0]} : () -> cpu_tensor<1xi64>
        (%72) = "full_int_array(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full_int_array",op_name:"pd_op.full_int_array",origin_id:3223,place:Place(cpu),stop_gradient:[true],value:[1]} : () -> cpu_tensor<1xi64>
        (%73) = "slice(phi_kernel)" (%70, %71, %72) {axes:[0],decrease_axis:[0],infer_flags:[1],kernel_key:<backend:CPU|layout:NCHW|dtype:int64>,kernel_name:"slice",op_name:"pd_op.slice",origin_id:3224,stop_gradient:[true]} : (cpu_tensor<2xi64>, cpu_tensor<1xi64>, cpu_tensor<1xi64>) -> cpu_tensor<i64>
        (%74) = "full(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full",op_name:"pd_op.full",origin_id:3225,place:Place(cpu),shape:[],stop_gradient:[true],value:2048} : () -> cpu_tensor<i64>
		.....
        (%78) = "memcpy_h2d(phi_kernel)" (%71) {dst_place_type:1,kernel_key:<backend:GPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"memcpy_h2d",op_name:"pd_op.memcpy_h2d",origin_id:3229} : (cpu_tensor<1xi64>) -> gpu_tensor<1xi64>
        (%79) = "memcpy_h2d(phi_kernel)" (%72) {dst_place_type:1,kernel_key:<backend:GPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"memcpy_h2d",op_name:"pd_op.memcpy_h2d",origin_id:3230} : (cpu_tensor<1xi64>) -> gpu_tensor<1xi64>
        (%80) = "memcpy_h2d(phi_kernel)" (%74) {dst_place_type:1,kernel_key:<backend:GPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"memcpy_h2d",op_name:"pd_op.memcpy_h2d",origin_id:3231} : (cpu_tensor<i64>) -> gpu_tensor<i64>
        () = "cf.yield" [id:3232] (%65, %69, %78, %79, %80, %77) {origin_id:3120} : (gpu_tensor<-1x1024xbf16>, gpu_tensor<-1x2560xbf16>, gpu_tensor<1xi64>, gpu_tensor<1xi64>, gpu_tensor<i64>, gpu_tensor<-1x2048xbf16>) -> 
    }

原因是我们在外部指定了输出的 place 为 GPUPlace,所以即使最后的 yield OP 输出带有 cpu_tensor,也会被在 lower 过程中插入的 memcpy_h2d OP 搬运到 GPU 上

    (%55, %56, %57, %58, %59, %60) = "pd_op.cuda_graph" [id:6315] () {} : () -> gpu_tensor<-1x1024xbf16>, gpu_tensor<-1x2560xbf16>, cpu_tensor<1xi64>, cpu_tensor<1xi64>, cpu_tensor<i64>, gpu_tensor<-1x2048xbf16>
    {
		......
        (%65) = "embedding(phi_kernel)" (%0, %64) {kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"embedding",op_name:"pd_op.embedding",origin_id:6320,padding_idx:-1,sparse:false,stop_gradient:[false]} : (gpu_tensor<-1xi64>, gpu_tensor<103424x1024xbf16>) -> gpu_tensor<-1x1024xbf16>
        (%66, %67, %68) = "rms_norm(phi_kernel)" (%65, <<NULL VALUE>>, <<NULL VALUE>>, %63, <<NULL VALUE>>) {begin_norm_axis:1,epsilon:1e-05,kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"rms_norm",op_name:"pd_op.rms_norm",origin_id:6321,quant_max_bound:0,quant_min_bound:0,quant_round_type:0,quant_scale:-1,stop_gradient:[false,false,false]} : (gpu_tensor<-1x1024xbf16>, <<NULL TYPE>>, <<NULL TYPE>>, gpu_tensor<1024xbf16>, <<NULL TYPE>>) -> gpu_tensor<-1x1024xbf16>, <<NULL TYPE>>, gpu_tensor<-1xf32>
        (%69) = "weight_only_linear(phi_kernel)" (%66, %62, <<NULL VALUE>>, %61) {arch:90,group_size:-1,kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"weight_only_linear",op_name:"pd_op.weight_only_linear",origin_id:6322,stop_gradient:[false],weight_dtype:"int8"} : (gpu_tensor<-1x1024xbf16>, gpu_tensor<2560x1024xi8>, <<NULL TYPE>>, gpu_tensor<2560xbf16>) -> gpu_tensor<-1x2560xbf16>
        (%70) = "shape64(phi_kernel)" (%69) {kernel_key:<backend:GPU|layout:NCHW|dtype:bfloat16>,kernel_name:"shape64",op_name:"pd_op.shape64",origin_id:6323,stop_gradient:[true]} : (gpu_tensor<-1x2560xbf16>) -> cpu_tensor<2xi64>
        (%71) = "full_int_array(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full_int_array",op_name:"pd_op.full_int_array",origin_id:6324,place:Place(cpu),stop_gradient:[true],value:[0]} : () -> cpu_tensor<1xi64>
        (%72) = "full_int_array(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full_int_array",op_name:"pd_op.full_int_array",origin_id:6325,place:Place(cpu),stop_gradient:[true],value:[1]} : () -> cpu_tensor<1xi64>
        (%73) = "slice(phi_kernel)" (%70, %71, %72) {axes:[0],decrease_axis:[0],infer_flags:[1],kernel_key:<backend:CPU|layout:NCHW|dtype:int64>,kernel_name:"slice",op_name:"pd_op.slice",origin_id:6326,stop_gradient:[true]} : (cpu_tensor<2xi64>, cpu_tensor<1xi64>, cpu_tensor<1xi64>) -> cpu_tensor<i64>
        (%74) = "full(phi_kernel)" () {dtype:int64,kernel_key:<backend:CPU|layout:Undefined(AnyLayout)|dtype:int64>,kernel_name:"full",op_name:"pd_op.full",origin_id:6327,place:Place(cpu),shape:[],stop_gradient:[true],value:2048} : () -> cpu_tensor<i64>
        (%75) = "builtin.combine" [id:6328] (%73, %74) {origin_id:5988,stop_gradient:[true]} : (cpu_tensor<i64>, cpu_tensor<i64>) -> vec[cpu_tensor<i64>,cpu_tensor<i64>]
        (%76) = "stack(phi_kernel)" (%75) {axis:0,kernel_key:<backend:CPU|layout:NCHW|dtype:int64>,kernel_name:"stack",op_name:"pd_op.stack",origin_id:6329,stop_gradient:[true]} : (vec[cpu_tensor<i64>,cpu_tensor<i64>]) -> cpu_tensor<2xi64>
        (%77) = "empty(phi_kernel)" (%76) {dtype:bfloat16,kernel_key:<backend:GPU|layout:Undefined(AnyLayout)|dtype:bfloat16>,kernel_name:"empty",op_name:"pd_op.empty",origin_id:6330,place:Place(undefined:0),stop_gradient:[true]} : (cpu_tensor<2xi64>) -> gpu_tensor<-1x2048xbf16>
        () = "cf.yield" [id:6331] (%65, %69, %71, %72, %74, %77) {origin_id:6222} : (gpu_tensor<-1x1024xbf16>, gpu_tensor<-1x2560xbf16>, cpu_tensor<1xi64>, cpu_tensor<1xi64>, cpu_tensor<i64>, gpu_tensor<-1x2048xbf16>) -> 
    }