Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,12 @@ updates:
directory: "/"
schedule:
interval: "monthly"
ignore:
# The SQLite JDBC is provided by Spigot and, for now, Paper.
# To ensure it is still present even if Paper stops providing it,
# we need to declare it as a library.
# To prevent needlessly downloading (and loading) a second copy,
# we want to ensure that we are using the provided version.
- dependency-name: "org.xerial:sqlite-jdbc"
# Spigot and Spigot API updates are manual.
- dependency-name: "org.spigotmc:*"
2 changes: 2 additions & 0 deletions gradle/libs.versions.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ folia-scheduler-wrapper = "v0.0.3"
errorprone-core = "2.45.0"
errorprone-gradle = "4.2.0"
slf4j = "2.0.17"
sqlite-jdbc = "3.49.1.0"

[libraries]
spigotapi = { module = "org.spigotmc:spigot-api", version.ref = "spigotapi" }
Expand All @@ -19,6 +20,7 @@ folia-scheduler-wrapper = { module = "com.github.NahuLD.folia-scheduler-wrapper:
errorprone-core = { module = "com.google.errorprone:error_prone_core", version.ref = "errorprone-core" }
errorprone-gradle = { module = "net.ltgt.gradle:gradle-errorprone-plugin", version.ref = "errorprone-gradle" }
slf4j-api = { module = "org.slf4j:slf4j-api", version.ref = "slf4j" }
sqlite-jdbc = { module = "org.xerial:sqlite-jdbc", version.ref = "sqlite-jdbc" }

[plugins]
paperweight = { id = "io.papermc.paperweight.userdev", version.ref = "paperweight" }
Expand Down
6 changes: 5 additions & 1 deletion plugin/build.gradle.kts
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,14 @@ dependencies {
implementation(project(":openinvadapterspigot", configuration = SpigotReobf.ARTIFACT_CONFIG))
implementation(libs.planarwrappers)
implementation(libs.folia.scheduler.wrapper)
compileOnly(libs.sqlite.jdbc)
}

tasks.processResources {
expand("version" to version)
expand(
"version" to version,
"sqlite" to libs.sqlite.jdbc.get().version
)
}

tasks.jar {
Expand Down
10 changes: 10 additions & 0 deletions plugin/src/main/java/com/lishid/openinv/OpenInv.java
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
import java.util.Locale;
import java.util.UUID;
import java.util.function.Consumer;
import java.util.logging.Level;

/**
* The main class for OpenInv.
Expand All @@ -69,6 +70,10 @@ public class OpenInv extends FoliaWrappedJavaPlugin implements IOpenInv {
private PlayerLoader playerLoader;
private boolean isSpigot = false;

public PlayerLoader getPlayerLoader() {
return playerLoader;
}

@Override
public void reloadConfig() {
super.reloadConfig();
Expand Down Expand Up @@ -96,6 +101,11 @@ public boolean onCommand(
@Override
public void onDisable() {
inventoryManager.evictAll();
try {
playerLoader.getProfileStore().shutdown();
} catch (Exception e) {
getLogger().log(Level.WARNING, "Failed to shut down profile store correctly", e);
}
}

@Override
Expand Down
140 changes: 69 additions & 71 deletions plugin/src/main/java/com/lishid/openinv/util/PlayerLoader.java
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,16 @@
import com.google.errorprone.annotations.Keep;
import com.lishid.openinv.OpenInv;
import com.lishid.openinv.util.config.Config;
import com.lishid.openinv.util.profile.OfflinePlayerProfileStore;
import com.lishid.openinv.util.profile.Profile;
import com.lishid.openinv.util.profile.ProfileStore;
import com.lishid.openinv.util.profile.sqlite.SqliteProfileStore;
import org.bukkit.Bukkit;
import org.bukkit.OfflinePlayer;
import org.bukkit.entity.Player;
import org.bukkit.event.EventHandler;
import org.bukkit.event.Listener;
import org.bukkit.event.player.PlayerJoinEvent;
import org.bukkit.profile.PlayerProfile;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;

Expand All @@ -33,7 +36,8 @@ public class PlayerLoader implements Listener {
private final @NotNull InventoryManager inventoryManager;
private final @NotNull InternalAccessor internalAccessor;
private final @NotNull Logger logger;
private final @NotNull Cache<String, PlayerProfile> lookupCache;
private final @NotNull Cache<String, Profile> lookupCache;
private @NotNull ProfileStore profileStore;

public PlayerLoader(
@NotNull OpenInv plugin,
Expand All @@ -46,10 +50,30 @@ public PlayerLoader(
this.config = config;
this.inventoryManager = inventoryManager;
this.internalAccessor = internalAccessor;
try {
SqliteProfileStore sqliteStore = new SqliteProfileStore(plugin);
sqliteStore.setup();
sqliteStore.tryImport();
this.profileStore = sqliteStore;
} catch (Exception e) {
this.profileStore = new OfflinePlayerProfileStore(logger);
}
this.logger = logger;
this.lookupCache = CacheBuilder.newBuilder().maximumSize(20).build();
}

public @NotNull ProfileStore getProfileStore() {
return profileStore;
}

public void setProfileStore(@NotNull ProfileStore profileStore) {
plugin.getLogger().log(
Level.INFO,
() -> "Setting profile store implementation to " + profileStore.getClass().getName()
);
this.profileStore = profileStore;
}

/**
* Load a {@link Player} from an {@link OfflinePlayer}. If the user has not played before or the default world for
* the server is not loaded, this will return {@code null}.
Expand All @@ -59,22 +83,20 @@ public PlayerLoader(
* @throws IllegalStateException if the server version is unsupported
*/
public @Nullable Player load(@NotNull OfflinePlayer offline) {
UUID key = offline.getUniqueId();

Player player = offline.getPlayer();
if (player != null) {
return player;
}

player = inventoryManager.getLoadedPlayer(key);
if (player != null) {
return player;
}

if (config.isOfflineDisabled() || !internalAccessor.isSupported()) {
return null;
}

player = inventoryManager.getLoadedPlayer(offline.getUniqueId());
if (player != null) {
return player;
}

if (Bukkit.isPrimaryThread()) {
return internalAccessor.getPlayerDataManager().loadPlayer(offline);
}
Expand All @@ -93,13 +115,6 @@ public PlayerLoader(
}

public @Nullable OfflinePlayer matchExact(@NotNull String name) {
// Warn if called on the main thread - if we resort to searching offline players, this may take several seconds.
if (Bukkit.getServer().isPrimaryThread()) {
logger.warning("Call to PlayerSearchCache#matchPlayer made on the main thread!");
logger.warning("This can cause the server to hang, potentially severely.");
logger.log(Level.WARNING, "Current stack trace", new Throwable("Current stack trace"));
}

OfflinePlayer player;

try {
Expand All @@ -123,9 +138,9 @@ public PlayerLoader(
}

// Cached offline match.
PlayerProfile cachedResult = lookupCache.getIfPresent(name);
if (cachedResult != null && cachedResult.getUniqueId() != null) {
player = Bukkit.getOfflinePlayer(cachedResult.getUniqueId());
Profile cachedResult = lookupCache.getIfPresent(name);
if (cachedResult != null) {
player = Bukkit.getOfflinePlayer(cachedResult.id());
// Ensure player is an existing player.
if (player.hasPlayedBefore() || player.isOnline()) {
return player;
Expand All @@ -135,10 +150,15 @@ public PlayerLoader(
}

// Exact offline match second - ensure offline access works when matchable users are online.
player = Bukkit.getServer().getOfflinePlayer(name);
Profile profile = profileStore.getProfileExact(name);
if (profile == null) {
return null;
}

player = Bukkit.getOfflinePlayer(profile.id());

if (player.hasPlayedBefore()) {
lookupCache.put(name, player.getPlayerProfile());
lookupCache.put(name, profile);
return player;
}

Expand All @@ -160,77 +180,55 @@ public PlayerLoader(
}

// Finally, inexact offline match.
float bestMatch = 0;
for (OfflinePlayer offline : Bukkit.getServer().getOfflinePlayers()) {
if (offline.getName() == null) {
// Loaded by UUID only, name has never been looked up.
continue;
}

float currentMatch = StringMetric.compareJaroWinkler(name, offline.getName());
Profile profile = getProfileStore().getProfileInexact(name);

if (currentMatch == 1.0F) {
return offline;
}

if (currentMatch > bestMatch) {
bestMatch = currentMatch;
player = offline;
}
if (profile == null) {
// No match found.
return null;
}

if (player != null) {
// If a match was found, store it.
lookupCache.put(name, player.getPlayerProfile());
// Get associated player and store match.
player = Bukkit.getOfflinePlayer(profile.id());
if (player.hasPlayedBefore()) {
lookupCache.put(name, profile);
return player;
}

// No players have ever joined the server.
return null;
}

@Keep
@EventHandler
private void onPlayerJoin(@NotNull PlayerJoinEvent event) {
plugin.getScheduler().runTaskLaterAsynchronously(() -> updateMatches(event), 7L);
}

private void updateMatches(@NotNull PlayerJoinEvent event) {
// Update profile store.
profileStore.addProfile(new Profile(event.getPlayer()));

// If player is not new, any cached values are valid.
if (event.getPlayer().hasPlayedBefore()) {
if (event.getPlayer().hasPlayedBefore() || lookupCache.size() == 0) {
return;
}

// New player may have a name that already points to someone else in lookup cache.
String name = event.getPlayer().getName();
lookupCache.invalidate(name);

// If the cache is empty, nothing to do. Don't hit scheduler.
if (lookupCache.size() == 0) {
return;
Iterator<Map.Entry<String, Profile>> iterator = lookupCache.asMap().entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, Profile> entry = iterator.next();
String oldMatch = entry.getValue().name();
String lookup = entry.getKey();
float oldMatchScore = StringMetric.compareJaroWinkler(lookup, oldMatch);
float newMatchScore = StringMetric.compareJaroWinkler(lookup, name);

// If new match exceeds old match, delete old match.
if (newMatchScore > oldMatchScore) {
iterator.remove();
}
}

plugin.getScheduler().runTaskLaterAsynchronously(
() -> {
Iterator<Map.Entry<String, PlayerProfile>> iterator = lookupCache.asMap().entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<String, PlayerProfile> entry = iterator.next();
String oldMatch = entry.getValue().getName();

// Shouldn't be possible - all profiles should be complete.
if (oldMatch == null) {
iterator.remove();
continue;
}

String lookup = entry.getKey();
float oldMatchScore = StringMetric.compareJaroWinkler(lookup, oldMatch);
float newMatchScore = StringMetric.compareJaroWinkler(lookup, name);

// If new match exceeds old match, delete old match.
if (newMatchScore > oldMatchScore) {
iterator.remove();
}
}
},
7L
);
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
package com.lishid.openinv.util.profile;

import com.github.jikoo.planarwrappers.scheduler.TickTimeUnit;
import me.nahu.scheduler.wrapper.WrappedJavaPlugin;
import me.nahu.scheduler.wrapper.runnable.WrappedRunnable;
import org.jetbrains.annotations.NotNull;

import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;

public abstract class BatchProfileStore implements ProfileStore {

private final Set<Profile> pending = Collections.synchronizedSet(new HashSet<>());
private final AtomicReference<WrappedRunnable> insertTask = new AtomicReference<>();
protected final @NotNull WrappedJavaPlugin plugin;

protected BatchProfileStore(@NotNull WrappedJavaPlugin plugin) {
this.plugin = plugin;
}

@Override
public void addProfile(@NotNull Profile profile) {
pending.add(profile);
buildBatch();
}

private void buildBatch() {
if (insertTask.compareAndSet(null, new WrappedRunnable() {
@Override
public void run() {
pushBatch();
}
})) {
try {
// Wait 5 seconds to accumulate other player data to reduce scheduler load on larger servers.
insertTask.get().runTaskLaterAsynchronously(plugin, TickTimeUnit.toTicks(5, TimeUnit.SECONDS));
} catch (IllegalStateException e) {
// If scheduling task fails, server is most likely shutting down.
insertTask.set(null);
}
}
}

private void pushBatch() {
Set<Profile> batch = new HashSet<>(pending);
// This is a bit roundabout but removes the risk of data loss.
pending.removeAll(batch);

// Push current batch.
pushBatch(batch);

WrappedRunnable running = insertTask.getAndSet(null);
if (running != null) {
running.cancel();
}

// If more profiles have been added, build another batch.
if (!pending.isEmpty()) {
buildBatch();
}
}

@Override
public void shutdown() {
WrappedRunnable wrappedRunnable = insertTask.get();
if (wrappedRunnable != null) {
wrappedRunnable.cancel();
}
pushBatch();
insertTask.set(null);
}

protected abstract void pushBatch(@NotNull Set<Profile> batch);

}
Loading