commit before updating gdx

This commit is contained in:
minjaesong
2023-10-05 14:57:03 +09:00
parent d81adc449b
commit 1088b1d29b
3 changed files with 9 additions and 5 deletions

View File

@@ -44,7 +44,7 @@ package=net.torvald.terrarum.modulebasegame
entrypoint=net.torvald.terrarum.modulebasegame.EntryPoint
# Release date in YYYY-MM-DD
releasedate=2023-09-08
releasedate=2023-10-06
# The version, must follow Semver 2.0.0 scheme (https://semver.org/)
version=0.3.3

View File

@@ -35,17 +35,19 @@ public class Float16FrameBuffer extends FrameBuffer {
public Float16FrameBuffer (int width, int height, boolean hasDepth) {
/*if (!App.gl40capable || App.operationSystem.equals("OSX")) { // disable float framebuffer for Apple M chips
FrameBufferBuilder bufferBuilder = new FrameBufferBuilder(width, height);
bufferBuilder.addColorTextureAttachment(GL20.GL_RGBA, GL20.GL_RGBA, GL20.GL_UNSIGNED_SHORT); // but 16bpp int works perfectly?!
bufferBuilder.addColorTextureAttachment(GL20.GL_RGBA, GL20.GL_RGBA, GL20.GL_UNSIGNED_SHORT); // but 16bpp creates slight banding
if (hasDepth) bufferBuilder.addBasicDepthRenderBuffer();
this.bufferBuilder = bufferBuilder;
}
else {
FloatFrameBufferBuilder bufferBuilder = new FloatFrameBufferBuilder(width, height);
bufferBuilder.addFloatAttachment(GL30.GL_RGBA16F, GL30.GL_RGBA, GL30.GL_FLOAT, false);
bufferBuilder.addFloatAttachment(GL30.GL_RGBA16F, GL30.GL_RGBA, GL30.GL_FLOAT, false); // float16 will not create a banding
if (hasDepth) bufferBuilder.addBasicDepthRenderBuffer();
this.bufferBuilder = bufferBuilder;
}*/
// FIXME temporarily disabling Float16 -- has LWJGL bug that prevents setOpenGLEmulation on Windows (or is this Nvidia's issue?)
FrameBufferBuilder bufferBuilder = new FrameBufferBuilder(width, height);
bufferBuilder.addColorTextureAttachment(GL20.GL_RGBA, GL20.GL_RGBA, GL20.GL_UNSIGNED_SHORT); // but 16bpp int works perfectly?!
if (hasDepth) bufferBuilder.addBasicDepthRenderBuffer();

View File

@@ -12,6 +12,7 @@ import com.badlogic.gdx.graphics.glutils.*;
import com.badlogic.gdx.utils.Disposable;
import com.badlogic.gdx.utils.GdxRuntimeException;
import com.badlogic.gdx.utils.JsonValue;
import com.badlogic.gdx.utils.SharedLibraryLoader;
import com.github.strikerx3.jxinput.XInputDevice;
import net.torvald.getcpuname.GetCpuName;
import net.torvald.terrarum.controller.GdxControllerAdapter;
@@ -276,7 +277,7 @@ public class App implements ApplicationListener {
Gdx.gl20.glViewport(0, 0, width, height);
}
public static final int TICK_SPEED = 64;
public static final int TICK_SPEED = 60; // using 60 as it's highly composite number
public static final float UPDATE_RATE = 1f / TICK_SPEED; // apparent framerate will be limited by update rate
private static float loadTimer = 0f;
@@ -418,7 +419,8 @@ public class App implements ApplicationListener {
Lwjgl3ApplicationConfiguration appConfig = new Lwjgl3ApplicationConfiguration();
//appConfig.useGL30 = false; // https://stackoverflow.com/questions/46753218/libgdx-should-i-use-gl30
if (processor.startsWith("Apple M")) appConfig.setOpenGLEmulation(Lwjgl3ApplicationConfiguration.GLEmulation.GL30, 3, 2);
if (SharedLibraryLoader.isMac)
appConfig.setOpenGLEmulation(Lwjgl3ApplicationConfiguration.GLEmulation.GL30, 3, 2);
appConfig.useVsync(getConfigBoolean("usevsync"));
appConfig.setResizable(false);