How to print path during backtracking? - depth-first-search

I am currently working on a backtracking program and was asked to print the path for the result. Here's an example:
Imagine we are given a weighted graph, represented by adjacency list, g,
g = {
"A": {"B": 6, "D": 1},
"B": {"A": 6, "C": 5, "D": 2, "E": 2},
"D": {"A": 1, "B": 2, "E": 2},
"E": {"B": 2, "C": 5, "D": 2},
"C": {"B": 5, "E": 5}
}
along with a start node "A" and target node "C", our goal is find the maximum value of the product of the edge weight and its path. For this example, we should find a path A -> B -> D -> E -> C, with the product of edges = 6 * 2 * 2 * 5 = 120. I have implemented a backtracking program to find the maxProduct, but I can't find a way to store the path in the class variable, List<String> path, can someone please help me to finish the implementation of storing the path into List<String> path Below is my backtracking implementation:
import java.util.*;
public class Test {
static final String START = "A";
static final String TARGET = "C";
List<String> path = new ArrayList<>();
public static void main(String[] args) {
Map<String, Map<String, Integer>> graph = getSimplerStaticData();
System.out.println(getMaximumPathProduct(graph, START, TARGET));
}
private static int getMaximumPathProduct(Map<String, Map<String, Integer>> graph, String start, String target) {
Set<String> seen = new HashSet<>();
seen.add(start);
return dfs(start, target, seen, graph, new LinkedList<>());
}
private static int dfs(String current, String target, Set<String> seen, Map<String, Map<String, Integer>> graph, List<String> subPath) {
if(target.equals(current)) {
return 1;
}
int res = 0;
Map<String, Integer> neighbors = graph.get(current);
for(String neighbor: neighbors.keySet()) {
if(!seen.contains(neighbor)) {
seen.add(neighbor);
int distance = neighbors.get(neighbor);
res = Math.max(res, distance * dfs(neighbor, target, seen, graph, subPath));
seen.remove(neighbor);
}
}
return res;
}
private static Map<String, Map<String, Integer>> getSimplerStaticData() {
Map<String, Map<String, Integer>> res = new HashMap<>();
Map<String, Integer> value1 = new HashMap<>();
value1.put("B", 6);
value1.put("D", 1);
res.put("A", value1);
Map<String, Integer> value2 = new HashMap<>();
value2.put("A", 6);
value2.put("D", 2);
value2.put("E", 2);
value2.put("C", 5);
res.put("B", value2);
Map<String, Integer> value3 = new HashMap<>();
value3.put("B", 5);
value3.put("E", 5);
res.put("C", value3);
Map<String, Integer> value4 = new HashMap<>();
value4.put("A", 1);
value4.put("B", 2);
value4.put("E", 2);
res.put("D", value4);
Map<String, Integer> value5 = new HashMap<>();
value5.put("B", 2);
value5.put("C", 5);
value5.put("D", 2);
res.put("E", value5);
return res;
}
}

Here is a prototype of what you want to achieve (I did not yet test it in various situations, nor try to optimize it, but it may help you start):
import java.util.*;
public class Test {
static final String START = "A";
static final String TARGET = "C";
static List<String> path = new ArrayList<>();
public static void main(String[] args) {
Map<String, Map<String, Integer>> graph = getSimplerStaticData();
System.out.println(getMaximumPathProduct(graph, START, TARGET));
System.out.println(path);
}
private static int getMaximumPathProduct(Map<String, Map<String, Integer>> graph, String start, String target) {
Set<String> seen = new HashSet<>();
seen.add(start);
List<String>aPath = new ArrayList<>();
aPath.add(start);
return dfs(start, target, seen, graph, aPath);
}
private static int dfs(String current, String target, Set<String> seen, Map<String, Map<String, Integer>> graph,
List<String> aPath) {
if(target.equals(current))
return 1;
int res = 0;
Map<String, Integer> neighbors = graph.get(current);
for(String neighbor: neighbors.keySet()) {
if(!seen.contains(neighbor) ) {
seen.add(neighbor);
List<String> newPath = new ArrayList<>(aPath);
newPath.add(neighbor);
int distance = neighbors.get(neighbor);
int newDistance = distance * dfs(neighbor, target, seen, graph, newPath);
if(newDistance > res){
res = newDistance;
path = newPath;
path.add(target);
}
seen.remove(neighbor);
}
}
return res;
}
private static Map<String, Map<String, Integer>> getSimplerStaticData() {
Map<String, Map<String, Integer>> res = new HashMap<>();
Map<String, Integer> value1 = new HashMap<>();
value1.put("B", 6);
value1.put("D", 1);
res.put("A", value1);
Map<String, Integer> value2 = new HashMap<>();
value2.put("A", 6);
value2.put("D", 2);
value2.put("E", 2);
value2.put("C", 5);
res.put("B", value2);
Map<String, Integer> value3 = new HashMap<>();
value3.put("B", 5);
value3.put("E", 5);
res.put("C", value3);
Map<String, Integer> value4 = new HashMap<>();
value4.put("A", 1);
value4.put("B", 2);
value4.put("E", 2);
res.put("D", value4);
Map<String, Integer> value5 = new HashMap<>();
value5.put("B", 2);
value5.put("C", 5);
value5.put("D", 2);
res.put("E", value5);
return res;
}
}

Related

How to get all keys whose values are null in Java 8 using Map

I was going through How to remove a key from HashMap while iterating over it?, but my requirement is bit different.
class Main {
public static void main(String[] args) {
Map<String, String> hashMap = new HashMap<>();
hashMap.put("RED", "#FF0000");
hashMap.put("BLACK", null);
hashMap.put("BLUE", "#0000FF");
hashMap.put("GREEN", "#008000");
hashMap.put("WHITE", null);
// I wan't result like below - get All keys whose value is null
List<String> collect = hashMap.values()
.stream()
.filter(e -> e == null)
.collect(Collectors.toList());
System.out.println(collect);
// Result - BLACK, WHITE in list
}
}
Try this:
import java.util.*;
import java.util.stream.*;
class Main {
public static void main(String[] args) {
Map<String, String> hashMap = new HashMap<>();
hashMap.put("RED", "#FF0000");
hashMap.put("BLACK", null);
hashMap.put("BLUE", "#0000FF");
hashMap.put("GREEN", "#008000");
hashMap.put("WHITE", null);
// I wan't result like below - get All keys whose value is null
List<String> collect = hashMap.keySet()
.stream()
.filter(e -> Objects.isNull(hashMap.get(e)))
.collect(Collectors.toList());
System.out.println(collect);
// Result - BLACK, WHITE in list
}
}
As pointed out in the comments, you can try this as well:
import java.util.*;
import java.util.stream.*;
class Main {
public static void main(String[] args) {
Map<String, String> hashMap = new HashMap<>();
hashMap.put("RED", "#FF0000");
hashMap.put("BLACK", null);
hashMap.put("BLUE", "#0000FF");
hashMap.put("GREEN", "#008000");
hashMap.put("WHITE", null);
// I wan't result like below - get All keys whose value is null
List<String> collect = hashMap.entrySet()
.stream()
.filter(e -> Objects.isNull(e.getValue()))
.map(e -> e.getKey())
.collect(Collectors.toList());
System.out.println(collect);
// Result - BLACK, WHITE in list
}
}
This is more optimized, as compared to the first solution.

Java8 generate Map containing another Map

How do I achieve this using java=8
I have a CSV in below format and from this i want to populate Map<String, Map<String, String>
where the outer map will have key scriptId and transationType as these are the distinct Type and inner map for scriptId key should contain first 5 values stating from position 2 as key and 3 as value.
<scriptId<
<TATA,TATA Moters>
<REL,Reliance Industries Ltd>
<LNT, L&T>
<SBI, State Bank of India>>
<transactionType,<
<P,B>
<S,S>>
Content of CSV File
Type,ArcesiumValue,GICValue
scriptId,TATA,TATA Moters
scriptId,REL,Reliance Industries Ltd
scriptId,LNT,L&T
scriptId,SBI,State Bank of India
transactionType,P,B
transactionType,S,S
How do i generate this using Java8
public void loadReferenceData() throws IOException {
List<Map<String, Map<String, String>>> cache = Files.lines(Paths.get("data/referenceDataMapping.csv")).skip(1)
.map(mapRefereneData).collect(Collectors.toList());
System.out.println(cache);
}
public static Function<String, Map<String, Map<String, String>>> mapRefereneData = (line) -> {
String[] sp = line.split(",");
Map<String, Map<String, String>> cache = new HashMap<String, Map<String, String>>();
try {
if (cache.containsKey(sp[0])) {
cache.get(sp[0]).put(sp[1], sp[2]);
} else {
Map<String, String> map = new HashMap<String, String>();
map.put(sp[1], sp[2]);
cache.put(sp[0], map);
}
} catch (NumberFormatException e) {
e.printStackTrace();
}
return cache;
};
Well it is much simpler to use two Collectors:
Map<String, Map<String, String>> groupCSV = Files.lines(Paths.get("..."))
.skip(1L).map(l -> l.split(","))
.collect(Collectors.groupingBy(a -> a[0], Collectors.toMap(a -> a[1], a -> a[2])));

Multiple input files and output the each file result in 1 line in 1 file (Hadoop:MapReduce)

I am stuck in separate each file wordcount result in 1 line.
I hope to output all files result in 1 file and each file result represent in 1 line.
Expected output.txt format
file1 1, 2, 3, 4, …, 100
file2 5, 2, 9, 6, …, 30
Currently output results in
each file wordcount result have sum up together
file1 123,22,31,...,100
file2 123,22,31,...,100
run()
MultipleInputs.addInputPath(job,in_path1,TextInputFormat.class,Map.class);
MultipleInputs.addInputPath(job,in_path2,TextInputFormat.class,Map.class);
Map
context.write(new Text("file1"),output);
context.write(new Text("file2"),output);
Reduce
context.write(new Text("file1"),new Text(sp.toString()));
context.write(new Text("file2"),new Text(sp.toString()));
Map
public static class Map extends Mapper<LongWritable, Text, Text, Text> {
private static final HashMap<String, Object> counter = new HashMap<>();
private Text output = new Text();
private String mapToString(HashMap<String, Object> map) {
StringBuilder sb = new StringBuilder();
Iterator<Entry<String, Object>> iter = map.entrySet().iterator();
while (iter.hasNext()) {
Entry<String, Object> entry = iter.next();
sb.append(entry.getKey());
sb.append('=');
sb.append(entry.getValue().toString());
if (iter.hasNext()) {
sb.append(';').append(' ');
}
}
return sb.toString();
}
public void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
// TODO: Get filename
String line = value.toString();
StringTokenizer tokenizer = new StringTokenizer(line);
while (tokenizer.hasMoreTokens()) {
String token = tokenizer.nextToken();
if (!counter.containsKey(token)) {
counter.put(token, 1);
} else {
counter.put(token, (Integer) counter.get(token) + 1);
}
}
output.set(mapToString(counter));
context.write(new Text("filename1"), output);
}
}
Reduce
public static class Reduce extends Reducer<Text, Text, Text, Text> {
public void reduce(Text key, Iterable<Text> values, Context context) throws IOException, InterruptedException {
int number = 0;
System.out.println(key);
for (Text val : values) { // val line record
String[] input = val.toString().split(";\\s");
for (int i = 0; i < input.length; i++) {
String[] temp = input[i].split("=");
String topValue = temp[0];
topValue = temp[0].replaceAll("[^a-zA-Z0-9]", "");
topValue = topValue.toLowerCase();
if (resultMap.containsKey(topValue)) {
int original = resultMap.get(topValue);
int sum = original + Integer.parseInt(temp[1]);
resultMap.put(topValue, sum);
}
}
}
}
#Override
protected void cleanup(Context context) throws IOException,
InterruptedException {
StringBuilder sp = new StringBuilder();
System.out.println("MapSize: " + resultMap);
int i = 0;
Iterator iterator = resultMap.entrySet().iterator();
while (iterator.hasNext()) {
Entry me2 = (Entry) iterator.next();
//System.out.println("key : " + me2.getKey());
sp.append(me2.getKey());
sp.append(":");
sp.append(me2.getValue());
System.out.println(me2.getValue());
sp.append(",");
}
context.write(new Text("file1"), new Text(sp.toString()));
context.write(new Text("file2"), new Text(sp.toString()));
}
}
I am stuck in two file word count combine together. And i hope to print each file word count in 1 line.

Performance issue on JavaFX TableView with complex TableColumn

I'm trying to creating a JavaFX TableView with "tower-shape" columns. But the performance is very bad when there are more than 1000 columns in a TableView.
Here is my code:
public class PaneDemo extends Application {
private TableView<String> table = new TableView<>();
private Long timeStart;
private Long timeEnd;
public static void main(String[] args) {
launch(args);
}
private static int getTotal(int layer) {
int total = 0;
int start = 1;
for (int i = 0; i < layer - 1; i++) {
start *= 5;
total += start;
}
System.out.println("Total Columns: " + (total + start * 5));
return total;
}
#Override
public void start(Stage stage) {
Scene scene = new Scene(new Group());
AnchorPane anchorPane = new AnchorPane();
anchorPane.setPrefWidth(900);
anchorPane.setPrefHeight(600);
table.setColumnResizePolicy(TableView.UNCONSTRAINED_RESIZE_POLICY);
new Thread(() -> {
timeStart = System.currentTimeMillis();
//init first layer
TableColumn<String, String> Test1 = new TableColumn<>("Test1");
TableColumn<String, String> Test2 = new TableColumn<>("Test2");
TableColumn<String, String> Test3 = new TableColumn<>("Test3");
TableColumn<String, String> Test4 = new TableColumn<>("Test4");
TableColumn<String, String> Test5 = new TableColumn<>("Test5");
Queue<TableColumn<String, ?>> queue = new LinkedList<>();
table.getColumns().addAll(Test1, Test2, Test3, Test4, Test5);
table.getItems().add("test");
queue.addAll(table.getColumns());
int index = 0;
// set the layer of the column tower
int temp = getTotal(4);
while (index < temp) {
TableColumn<String, ?> root = queue.poll();
TableColumn<String, String> test1 = new TableColumn<>("test1");
TableColumn<String, String> test2 = new TableColumn<>("test2");
TableColumn<String, String> test3 = new TableColumn<>("test3");
TableColumn<String, String> test4 = new TableColumn<>("test4");
TableColumn<String, String> test5 = new TableColumn<>("test5");
root.getColumns().addAll(test1, test2, test3, test4, test5);
queue.addAll(root.getColumns());
index++;
}
while (!queue.isEmpty()) {
generateCellFactory((TableColumn<String, String>) queue.poll());
}
table.prefHeightProperty().bind(anchorPane.heightProperty());
table.prefWidthProperty().bind(anchorPane.widthProperty());
anchorPane.getChildren().add(table);
((Group) scene.getRoot()).getChildren().addAll(anchorPane);
stage.setScene(scene);
stage.show();
Platform.runLater(() -> {
timeEnd = System.currentTimeMillis();
System.out.println("Layout Time: " + (timeEnd - timeStart) + "ms");
});
}).run();
}
private <T> void generateCellFactory(TableColumn<T, String> column) {
column.setCellFactory(cell -> {
return new TableCell<T, String>() {
#Override
protected void updateItem(String item, boolean empty) {
super.updateItem(item, empty);
setText("Test");
}
};
});
}
}
On my PC the performance like this:
Total Columns: 780
Layout Time: 9810ms
Total Columns: 3905
Layout Time: 43920ms
Is there any way that I can improve the performance? Or any some pagination can be used on TableColumn?

Java 8 Stream , convert List<File> to Map<Integer, List<FIle>>

I have below code in traditional java loop. Would like to use Java 8 Stream instead.
I have a sorted list of files(Sorted by file size). I group these files together in a way that the total size of all files does not exceed the given max size and put them in a Map with the key 1,2,3,... so on. Here is the code.
List<File> allFilesSortedBySize = getListOfFiles();
Map<Integer, List<File>> filesGroupedByMaxSizeMap = new HashMap<Integer, List<File>>();
double totalLength = 0L;
int count = 0;
List<File> filesWithSizeTotalMaxSize = Lists.newArrayList();
//group the files to be zipped together as per maximum allowable size in a map
for (File file : allFilesSortedBySize) {
long sizeInBytes = file.length();
double sizeInMb = (double)sizeInBytes / (1024 * 1024);
totalLength = totalLength + sizeInMb;
if(totalLength <= maxSize) {
filesWithSizeTotalMaxSize.add(file);
} else {
count = count + 1;
filesGroupedByMaxSizeMap.put(count, filesWithSizeTotalMaxSize);
filesWithSizeTotalMaxSize = Lists.newArrayList();
filesWithSizeTotalMaxSize.add(file);
totalLength = sizeInMb;
}
}
filesGroupedByMaxSizeMap.put(count+1, filesWithSizeTotalMaxSize);
return filesGroupedByMaxSizeMap;
after reading,I found the solution using Collectors.groupBy instead.
Code using java8 lambda expression
private final long MB = 1024 * 1024;
private Map<Integer, List<File>> grouping(List<File> files, long maxSize) {
AtomicInteger group = new AtomicInteger(0);
AtomicLong groupSize = new AtomicLong();
return files.stream().collect(groupingBy((file) -> {
if (groupSize.addAndGet(file.length()) <= maxSize * MB) {
return group.get() == 0 ? group.incrementAndGet() : group.get();
}
groupSize.set(file.length());
return group.incrementAndGet();
}));
}
Code provided by #Holger then you are free to checking group whether equals 0
private static final long MB = 1024 * 1024;
private Map<Integer, List<File>> grouping(List<File> files, long maxSize) {
AtomicInteger group = new AtomicInteger(0);
//force initializing group starts with 1 even if the first file is empty.
AtomicLong groupSize = new AtomicLong(maxSize * MB + 1);
return files.stream().collect(groupingBy((file) -> {
if (groupSize.addAndGet(file.length()) <= maxSize * MB) {
return group.get();
}
groupSize.set(file.length());
return group.incrementAndGet();
}));
}
Code using anonymous class
inspired by #Holger, All “solutions” using a grouping function that modifies external state are hacks abusing the API,so you can use anonymous class to manage the grouping logic state in class.
private static final long MB = 1024 * 1024;
private Map<Integer, List<File>> grouping(List<File> files, long maxSize) {
return files.stream().collect(groupingBy(groupSize(maxSize)));
}
private Function<File, Integer> groupSize(final long maxSize) {
long maxBytesSize = maxSize * MB;
return new Function<File, Integer>() {
private int group;
private long groupSize = maxBytesSize + 1;
#Override
public Integer apply(File file) {
return hasRemainingFor(file) ? current(file) : next(file);
}
private boolean hasRemainingFor(File file) {
return (groupSize += file.length()) <= maxBytesSize;
}
private int next(File file) {
groupSize = file.length();
return ++group;
}
private int current(File file) {
return group;
}
};
}
Test
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static java.util.stream.Collectors.groupingBy;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
/**
* Created by holi on 3/24/17.
*/
public class StreamGroupingTest {
private final File FILE_1MB = file(1);
private final File FILE_2MB = file(2);
private final File FILE_3MB = file(3);
#Test
void eachFileInIndividualGroupIfEachFileSizeGreaterThanMaxSize() {
Map<Integer, List<File>> groups = grouping(asList(FILE_2MB, FILE_3MB), 1);
assertThat(groups.size(), equalTo(2));
assertThat(groups.get(1), equalTo(singletonList(FILE_2MB)));
assertThat(groups.get(2), equalTo(singletonList(FILE_3MB)));
}
#Test
void allFilesInAGroupIfTotalSizeOfFilesLessThanOrEqualMaxSize() {
Map<Integer, List<File>> groups = grouping(asList(FILE_2MB, FILE_3MB), 5);
assertThat(groups.size(), equalTo(1));
assertThat(groups.get(1), equalTo(asList(FILE_2MB, FILE_3MB)));
}
#Test
void allNeighboringFilesInAGroupThatTotalOfTheirSizeLessThanOrEqualMaxSize() {
Map<Integer, List<File>> groups = grouping(asList(FILE_1MB, FILE_2MB, FILE_3MB), 3);
assertThat(groups.size(), equalTo(2));
assertThat(groups.get(1), equalTo(asList(FILE_1MB, FILE_2MB)));
assertThat(groups.get(2), equalTo(singletonList(FILE_3MB)));
}
#Test
void eachFileInIndividualGroupIfTheFirstFileAndTotalOfEachNeighboringFilesSizeGreaterThanMaxSize() {
Map<Integer, List<File>> groups = grouping(asList(FILE_2MB, FILE_1MB, FILE_3MB), 2);
assertThat(groups.size(), equalTo(3));
assertThat(groups.get(1), equalTo(singletonList(FILE_2MB)));
assertThat(groups.get(2), equalTo(singletonList(FILE_1MB)));
assertThat(groups.get(3), equalTo(singletonList(FILE_3MB)));
}
#Test
void theFirstEmptyFileInGroup1() throws Throwable {
File emptyFile = file(0);
Map<Integer, List<File>> groups = grouping(singletonList(emptyFile), 2);
assertThat(groups.get(1), equalTo(singletonList(emptyFile)));
}
private static final long MB = 1024 * 1024;
private Map<Integer, List<File>> grouping(List<File> files, long maxSize) {
AtomicInteger group = new AtomicInteger(0);
AtomicLong groupSize = new AtomicLong(maxSize * MB + 1);
return files.stream().collect(groupingBy((file) -> {
if (groupSize.addAndGet(file.length()) <= maxSize * MB) {
return group.get();
}
groupSize.set(file.length());
return group.incrementAndGet();
}));
}
private Function<File, Integer> groupSize(final long maxSize) {
long maxBytesSize = maxSize * MB;
return new Function<File, Integer>() {
private int group;
private long groupSize = maxBytesSize + 1;
#Override
public Integer apply(File file) {
return hasRemainingFor(file) ? current(file) : next(file);
}
private boolean hasRemainingFor(File file) {
return (groupSize += file.length()) <= maxBytesSize;
}
private int next(File file) {
groupSize = file.length();
return ++group;
}
private int current(File file) {
return group;
}
};
}
private File file(int sizeOfMB) {
return new File(String.format("%dMB file", sizeOfMB)) {
#Override
public long length() {
return sizeOfMB * MB;
}
#Override
public boolean equals(Object obj) {
File that = (File) obj;
return length() == that.length();
}
};
}
}
Since the processing of each element highly depends on the previous’ processing, this task is not suitable for streams. You still can achieve it using a custom collector, but the implementation would be much more complicated than the loop solution.
In other words, there is no improvement when you rewrite this as a stream operation. Stay with the loop.
However, there are still some things you can improve.
List<File> allFilesSortedBySize = getListOfFiles();
// get maxSize in bytes ONCE, instead of converting EACH size to MiB
long maxSizeBytes = (long)(maxSize * 1024 * 1024);
// use "diamond operator"
Map<Integer, List<File>> filesGroupedByMaxSizeMap = new HashMap<>();
// start with "create new list" condition to avoid code duplication
long totalLength = maxSizeBytes;
// count is obsolete, the map maintains a size
// the initial "totalLength = maxSizeBytes" forces creating a new list within the loop
List<File> filesWithSizeTotalMaxSize = null;
for(File file: allFilesSortedBySize) {
long length = file.length();
if(maxSizeBytes-totalLength <= length) {
filesWithSizeTotalMaxSize = new ArrayList<>(); // no utility method needed
// store each list immediately, so no action after the loop needed
filesGroupedByMaxSizeMap.put(filesGroupedByMaxSizeMap.size()+1,
filesWithSizeTotalMaxSize);
totalLength = 0;
}
totalLength += length;
filesWithSizeTotalMaxSize.add(file);
}
return filesGroupedByMaxSizeMap;
You may further replace
filesWithSizeTotalMaxSize = new ArrayList<>();
filesGroupedByMaxSizeMap.put(filesGroupedByMaxSizeMap.size()+1,
filesWithSizeTotalMaxSize);
with
filesWithSizeTotalMaxSize = filesGroupedByMaxSizeMap.computeIfAbsent(
filesGroupedByMaxSizeMap.size()+1, x -> new ArrayList<>());
but there might be different opinions whether this is an improvement.
The simplest solution to the problem I could think of is to use an AtomicLong wrapper for the size and a AtomicInteger wrapper for length. These have some useful methods for performing basic arithmetic operations on them which are very useful in this particular case.
List<File> files = getListOfFiles();
AtomicLong length = new AtomicLong();
AtomicInteger index = new AtomicInteger(1);
long maxLength = SOME_ARBITRARY_NUMBER;
Map<Integer, List<File>> collect = files.stream().collect(Collectors.groupingBy(
file -> {
if (length.addAndGet(file.length()) <= maxLength) {
return index.get();
}
length.set(file.length());
return index.incrementAndGet();
}
));
return collect;
Basically what Collectors.groupingBy does the work which you Intended.

Resources