{"id":17,"date":"2024-10-25T14:49:44","date_gmt":"2024-10-25T06:49:44","guid":{"rendered":"http:\/\/39.105.111.94\/?p=17"},"modified":"2024-10-29T09:59:19","modified_gmt":"2024-10-29T01:59:19","slug":"synthetichuman%e6%95%b0%e6%8d%ae%e9%9b%86%e7%9a%84cam%e5%a4%84%e7%90%86","status":"publish","type":"post","link":"https:\/\/sodalee.top\/?p=17","title":{"rendered":"SyntheticHuman++\u6570\u636e\u96c6\u7684cam\u5904\u7406"},"content":{"rendered":"\n<p class=\"has-medium-font-size\">\u8fd9\u4e2a\u6570\u636e\u96c6\u7684cam\u53c2\u6570\u5904\u7406\u4f3c\u4e4e\u548c\u522b\u7684\u4e0d\u592a\u4e00\u6837<br>render_camera_view_from_panorama\u539f\u672c\u662fChatGPT\u751f\u6210\u7684\uff0c\u4f46\u662f\u5bf9theta\u548cphi\u7684\u5904\u7406\u8fdb\u884c\u4e86\u4fee\u6539\u4ee5\u7b26\u5408\u539f\u4ed3\u5e93\u89c6\u89c9\u6548\u679c<br>sample_envmap_image\u4e2d\u7684print\u53ef\u4ee5\u7528\u4e8e\u540e\u7eed\u5bf9envmap\u7684\u65cb\u8f6c\uff08\u8ba1\u7b97\u4e86theta\uff09<br>KRT\u4e0b\u65b9\u7684\u6ce8\u91ca\u884cR\u662fHDR\u56fe\u6b63\u4e2d\u548c\u6b63\u80cc\u65b9\u5411<\/p>\n\n\n\n<pre class=\"wp-block-code has-small-font-size\"><code>import imageio\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\nimport sys\nimport torch\nfrom torch.nn import functional as F\n\ndef ACESToneMapping(color, adapted_lum):\n    A = 2.51\n    B = 0.03\n    C = 2.43\n    D = 0.59\n    E = 0.14\n\n    color *= adapted_lum\n    return (color * (A * color + B)) \/ (color * (C * color + D) + E)\n\ndef render_camera_view_from_panorama(panorama, camera_directions):\n    \"\"\"\n    panorama: \u8f93\u5165\u5168\u666f\u56fe (Equirectangular HDR)\n    K: \u76f8\u673a\u7684\u5185\u53c2\u77e9\u9635 3x3\n    R: \u65cb\u8f6c\u77e9\u9635 3x3\n    T: \u5e73\u79fb\u5411\u91cf 3x1\n    img_width: \u6e32\u67d3\u7ed3\u679c\u7684\u5bbd\u5ea6\n    img_height: \u6e32\u67d3\u7ed3\u679c\u7684\u9ad8\u5ea6\n    \"\"\"\n    camera_directions = camera_directions.numpy()\n\n    # \u5c06\u4e16\u754c\u5750\u6807\u7cfb\u4e2d\u7684\u65b9\u5411\u5411\u91cf\u8f6c\u6362\u4e3a\u7403\u9762\u5750\u6807\uff08\u7ecf\u5ea6\u3001\u7eac\u5ea6\uff09\n    theta = np.arctan2(-camera_directions&#91;..., 1], camera_directions&#91;..., 0])\n    phi = np.arcsin(camera_directions&#91;..., 2])\n\n    # \u5c06\u7403\u9762\u5750\u6807\u6620\u5c04\u56de\u5168\u666f\u56fe\u7684\u50cf\u7d20\u5750\u6807\n    u = (theta \/ (2 * np.pi) + 0.5) * panorama.shape&#91;1]\n    v = (-phi \/ np.pi + 0.5) * panorama.shape&#91;0]\n\n    # \u91c7\u6837\u5168\u666f\u56fe\u7684\u989c\u8272\u503c\uff08\u4f7f\u7528\u53cc\u7ebf\u6027\u63d2\u503c\uff09\n    sampled_image = cv2.remap(panorama, u.astype(np.float32), v.astype(np.float32), cv2.INTER_CUBIC, borderMode=cv2.BORDER_WRAP)\n\n    return sampled_image\n\ndef sample_envmap_image(image: torch.Tensor, ray_d: torch.Tensor):\n    ray_d = ray_d.type(torch.float32)\n    sh = ray_d.shape\n    if image.ndim == 4:\n        image = image&#91;0]\n    h, w = ray_d.size()&#91;:2]\n    ind = h \/\/ 2 * w + w \/\/ 2\n    ray_d = ray_d.view(-1, 3)\n    # envmap: H, W, C\n    # viewdirs: N, 3\n\n    # https:\/\/github.com\/zju3dv\/InvRender\/blob\/45e6cdc5e3c9f092b5d10e2904bbf3302152bb2f\/code\/model\/sg_render.py\n    image = image.permute(2, 0, 1).unsqueeze(0)\n\n    theta = torch.arccos(ray_d&#91;:, 2]).reshape(-1) - 1e-6\n    phi = torch.atan2(ray_d&#91;:, 1], ray_d&#91;:, 0]).reshape(-1)  # 0 - pi\n\n    # normalize to &#91;-1, 1]\n    print(-torch.atan2(ray_d&#91;ind, 1], ray_d&#91;ind, 0]) \/ torch.pi, torch.arccos(ray_d&#91;ind, 2]) \/ torch.pi * 2 - 1)\n    query_y = (theta \/ torch.pi) * 2 - 1\n    query_x = - phi \/ torch.pi\n    grid = torch.stack((query_x, query_y)).permute(1, 0).unsqueeze(0).unsqueeze(0)\n\n    rgb = F.grid_sample(image, grid, align_corners=False, padding_mode='border')\n    rgb = rgb.squeeze().permute(1, 0)\n    return rgb.view(sh)\n\ndef normalize(x: torch.Tensor, eps: float = 1e-8) -> torch.Tensor:\n    # channel last: normalization\n    return x \/ (x.norm(dim=-1, keepdim=True) + eps)\n\ndef get_rays(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor, subpixel=False):\n    '''\n    inputs:\n        H, W: height, width of the pixel world\n        K: 3x3 matrix of the camera, f for focal length, (cx cy) for center point\n        R: 3x3 ratation matrix of the camera\n        T: 3x1 matrix for displacement\n    outputs:\n        ray_o: ray start points\n        ray_d: ray direction vectors\n    '''\n    # calculate the camera origin\n    ray_o = -(R.mT @ T).ravel()\n    # calculate the world coodinates of pixels\n    i, j = torch.meshgrid(torch.arange(H, dtype=R.dtype, device=R.device),\n                          torch.arange(W, dtype=R.dtype, device=R.device),\n                          indexing='ij')\n    # 0->H, 0->W\n    xy1 = torch.stack(&#91;j, i, torch.ones_like(i)], dim=2)\n    if subpixel:\n        rand = torch.rand(H, W, 2, device=R.device, dtype=R.dtype) - 0.5\n        xy1&#91;:, :, :2] += rand\n    pixel_camera = xy1 @ torch.inverse(K).mT\n    pixel_world = (pixel_camera - T.ravel()) @ R\n    # calculate the ray direction\n    ray_o = ray_o&#91;None, None].expand(pixel_world.shape)\n    ray_d = normalize(pixel_world - ray_o)\n    return ray_o, ray_d\n\ndef get_(cams, index):\n    return cams&#91;'K']&#91;index], cams&#91;'R']&#91;index], cams&#91;'T']&#91;index]\n\n# os.chdir(os.path.dirname(os.path.abspath(__file__)))\nimg = cv2.imread('.\/datasets\/HDR\/warm_restaurant_4k.hdr', cv2.IMREAD_UNCHANGED)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nimg = ACESToneMapping(img, 1.0)\n\n# pes = GetPerspective(img, 100, 00, -90, 1024, 1024)\ncams = {}\ncams = np.load('.\/datasets\/synthetic_human_pp\/josh\/annots.npy', allow_pickle=True).ravel()&#91;0]\nK, R, T = get_(cams&#91;'cams'], 3)\n# R = np.array(&#91;&#91;0, -1, 0], &#91;0, 0, -1], &#91;1, 0, 0]], dtype=np.double) # to center\n# R = np.array(&#91;&#91;0, 1, 0], &#91;0, 0, -1], &#91;-1, 0, 0]], dtype=np.double) # to back\nray_o, ray_d = get_rays(1024, 1024, torch.from_numpy(K), torch.from_numpy(R), torch.from_numpy(T))\n# pes = render_camera_view_from_panorama(img, ray_d)\npes = sample_envmap_image(torch.from_numpy(img), ray_d)\n\n\nplt.imshow(img)\nplt.show()\nplt.imshow(pes)\nplt.show()<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u8fd9\u4e2a\u6570\u636e\u96c6\u7684cam\u53c2\u6570\u5904\u7406\u4f3c\u4e4e\u548c\u522b\u7684\u4e0d\u592a\u4e00\u6837render_camera_view_from_panorama\u539f\u672c\u662fChatGPT\u751f &#8230;<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"closed","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"emotion":"","emotion_color":"","title_style":"","license":"","footnotes":""},"categories":[2],"tags":[],"class_list":["post-17","post","type-post","status-publish","format-standard","hentry","category-2"],"_links":{"self":[{"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/posts\/17","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/sodalee.top\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=17"}],"version-history":[{"count":6,"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/posts\/17\/revisions"}],"predecessor-version":[{"id":25,"href":"https:\/\/sodalee.top\/index.php?rest_route=\/wp\/v2\/posts\/17\/revisions\/25"}],"wp:attachment":[{"href":"https:\/\/sodalee.top\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=17"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/sodalee.top\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=17"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/sodalee.top\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=17"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}