123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152 |
- import {
- a,
- a3 as a2,
- e as e2,
- f,
- i,
- n as n3,
- o,
- o2,
- o3
- } from "./chunk-FQZKJNXZ.js";
- import {
- n as n2
- } from "./chunk-K7B6OWCU.js";
- import {
- r as r2
- } from "./chunk-E3G7BRZB.js";
- import {
- n
- } from "./chunk-C7742RNZ.js";
- import {
- r
- } from "./chunk-YAEIHDJH.js";
- import {
- e
- } from "./chunk-YXWMMD76.js";
- // node_modules/@arcgis/core/views/3d/webgl-engine/core/shaderModules/Float4PassUniform.js
- var e3 = class extends i {
- constructor(r3, e4) {
- super(r3, "vec4", a.Pass, (s, o4, t) => s.setUniform4fv(r3, e4(o4, t)));
- }
- };
- // node_modules/@arcgis/core/views/3d/webgl-engine/core/shaderLibrary/util/CameraSpace.glsl.js
- function n4(r3) {
- r3.fragment.uniforms.add(new e3("projInfo", (r4, o4) => f2(o4))), r3.fragment.uniforms.add(new e2("zScale", (r4, o4) => i2(o4))), r3.fragment.code.add(n3`vec3 reconstructPosition(vec2 fragCoord, float depth) {
- return vec3((fragCoord * projInfo.xy + projInfo.zw) * (zScale.x * depth + zScale.y), depth);
- }`);
- }
- function f2(r3) {
- const o4 = r3.camera.projectionMatrix;
- return 0 === o4[11] ? r(m, 2 / (r3.camera.fullWidth * o4[0]), 2 / (r3.camera.fullHeight * o4[5]), (1 + o4[12]) / o4[0], (1 + o4[13]) / o4[5]) : r(m, -2 / (r3.camera.fullWidth * o4[0]), -2 / (r3.camera.fullHeight * o4[5]), (1 - o4[8]) / o4[0], (1 - o4[9]) / o4[5]);
- }
- var m = n();
- function i2(o4) {
- return 0 === o4.camera.projectionMatrix[11] ? r2(d, 0, 1) : r2(d, 1, 0);
- }
- var d = n2();
- // node_modules/@arcgis/core/chunks/SSAO.glsl.js
- var m2 = 16;
- var p = 0.5;
- function d2() {
- const o4 = new o2(), d3 = o4.fragment;
- return o4.include(o), d3.include(a2), o4.include(n4), d3.uniforms.add(new o3("radius", (e4, r3) => v(r3))), d3.code.add(n3`vec3 sphere[16];
- void fillSphere() {
- sphere[0] = vec3(0.186937, 0.0, 0.0);
- sphere[1] = vec3(0.700542, 0.0, 0.0);
- sphere[2] = vec3(-0.864858, -0.481795, -0.111713);
- sphere[3] = vec3(-0.624773, 0.102853, -0.730153);
- sphere[4] = vec3(-0.387172, 0.260319, 0.007229);
- sphere[5] = vec3(-0.222367, -0.642631, -0.707697);
- sphere[6] = vec3(-0.01336, -0.014956, 0.169662);
- sphere[7] = vec3(0.122575, 0.1544, -0.456944);
- sphere[8] = vec3(-0.177141, 0.85997, -0.42346);
- sphere[9] = vec3(-0.131631, 0.814545, 0.524355);
- sphere[10] = vec3(-0.779469, 0.007991, 0.624833);
- sphere[11] = vec3(0.308092, 0.209288,0.35969);
- sphere[12] = vec3(0.359331, -0.184533, -0.377458);
- sphere[13] = vec3(0.192633, -0.482999, -0.065284);
- sphere[14] = vec3(0.233538, 0.293706, -0.055139);
- sphere[15] = vec3(0.417709, -0.386701, 0.442449);
- }
- float fallOffFunction(float vv, float vn, float bias) {
- float f = max(radius * radius - vv, 0.0);
- return f * f * f * max(vn-bias, 0.0);
- }`), d3.code.add(n3`float aoValueFromPositionsAndNormal(vec3 C, vec3 n_C, vec3 Q) {
- vec3 v = Q - C;
- float vv = dot(v, v);
- float vn = dot(normalize(v), n_C);
- return fallOffFunction(vv, vn, 0.1);
- }`), d3.uniforms.add([new e2("nearFar", (e4, r3) => r3.camera.nearFar), new f("normalMap", (e4) => e4.normalTexture), new f("depthMap", (e4) => e4.depthTexture), new e2("zScale", (e4, r3) => i2(r3)), new o3("projScale", (e4) => e4.projScale), new f("rnm", (e4) => e4.noiseTexture), new e2("rnmScale", (o5, t) => r2(h, t.camera.fullWidth / e(o5.noiseTexture).descriptor.width, t.camera.fullHeight / e(o5.noiseTexture).descriptor.height)), new o3("intensity", (e4, r3) => 4 * p / v(r3) ** 6), new e2("screenSize", (e4, o5) => r2(h, o5.camera.fullWidth, o5.camera.fullHeight))]), d3.code.add(n3`
- void main(void) {
- fillSphere();
- vec3 fres = normalize((texture2D(rnm, uv * rnmScale).xyz * 2.0) - vec3(1.0));
- float currentPixelDepth = linearDepthFromTexture(depthMap, uv, nearFar);
- if (-currentPixelDepth>nearFar.y || -currentPixelDepth<nearFar.x) {
- gl_FragColor = vec4(0.0);
- return;
- }
- vec3 currentPixelPos = reconstructPosition(gl_FragCoord.xy,currentPixelDepth);
- // get the normal of current fragment
- vec4 norm4 = texture2D(normalMap, uv);
- vec3 norm = vec3(-1.0) + 2.0 * norm4.xyz;
- bool isTerrain = norm4.w<0.5;
- float sum = .0;
- vec3 tapPixelPos;
- // note: the factor 2.0 should not be necessary, but makes ssao much nicer.
- // bug or deviation from CE somewhere else?
- float ps = projScale / (2.0 * currentPixelPos.z * zScale.x + zScale.y);
- for(int i = 0; i < ${n3.int(m2)}; ++i) {
- vec2 unitOffset = reflect(sphere[i], fres).xy;
- vec2 offset = vec2(-unitOffset * radius * ps);
- //don't use current or very nearby samples
- if ( abs(offset.x)<2.0 || abs(offset.y)<2.0) continue;
- vec2 tc = vec2(gl_FragCoord.xy + offset);
- if (tc.x < 0.0 || tc.y < 0.0 || tc.x > screenSize.x || tc.y > screenSize.y) continue;
- vec2 tcTap = tc / screenSize;
- float occluderFragmentDepth = linearDepthFromTexture(depthMap, tcTap, nearFar);
- if (isTerrain) {
- bool isTerrainTap = texture2D(normalMap, tcTap).w<0.5;
- if (isTerrainTap) {
- continue;
- }
- }
- tapPixelPos = reconstructPosition(tc, occluderFragmentDepth);
- sum+= aoValueFromPositionsAndNormal(currentPixelPos, norm, tapPixelPos);
- }
- // output the result
- float A = max(1.0 - sum * intensity / float(${n3.int(m2)}),0.0);
- // Anti-tone map to reduce contrast and drag dark region farther: (x^0.2 + 1.2 * x^4)/2.2
- A = (pow(A, 0.2) + 1.2 * A*A*A*A) / 2.2;
- gl_FragColor = vec4(A);
- }
- `), o4;
- }
- function v(e4) {
- return Math.max(10, 20 * e4.camera.computeRenderPixelSizeAtDist(Math.abs(4 * e4.camera.relativeElevation)));
- }
- var h = n2();
- var x = Object.freeze(Object.defineProperty({ __proto__: null, build: d2 }, Symbol.toStringTag, { value: "Module" }));
- export {
- e3 as e,
- d2 as d,
- x
- };
- //# sourceMappingURL=chunk-UUVY36LI.js.map
|